@Preamble{
"\hyphenation{ }" #
"\ifx \undefined \circled \def \circled #1{(#1)}\fi" #
"\ifx \undefined \reg \def \reg {\circled{R}}\fi"
}
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|https://www.math.utah.edu/~beebe/|"}
@String{j-SIGMETRICS = "ACM SIGMETRICS Performance Evaluation Review"}
@String{pub-ACM = "ACM Press"}
@String{pub-ACM:adr = "New York, NY 10036, USA"}
@Article{Keirstead:1972:STC,
author = "Ralph E. Keirstead and Donn B. Parker",
title = "Software testing and certification",
journal = j-SIGMETRICS,
volume = "1",
number = "1",
pages = "3--8",
month = mar,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041596.1041597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:42 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Society needs a continuous flow of upgradding products
and services which are responsive to needs, are
reliable, cost-effective and safe. When this does not
occur, excessive regulation and resulting stifled
technology and production results. Excesses in both
directions have occurred in other fields such as
medicine, the automobile industry, petro-chemicals,
motion pictures, building construction and
pharmaceuticals. Disasters based on poor design and
implementation in information processing have occurred
in ballot-counting systems, law enforcement systems,
billing systems, credit systems and dating services.
Business has been undersold and oversold and sometimes
reached the brink of ruin in its increasing reliance on
computer systems. The only answer is a balanced degree
of self-regulation. Such self-regulation for software
systems is presented here.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bell:1972:CME,
author = "Thomas E. Bell",
title = "Computer measurement and evaluation: artistry, or
science?",
journal = j-SIGMETRICS,
volume = "1",
number = "2",
pages = "4--10",
month = jun,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113640.1113641",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Effort invested in computer measurement and evaluation
is clearly increasing, but the results of this
increasing investment may be unfortunate. The
undeniable value of the results and the enthusiasm of
participants may be leading to unrealizable
expectations. The present artistry needs to be
converted into a science for achieving a solid future;
the most fruitful direction may be the synthesis of
individual, empirical discoveries combined with testing
hypotheses about performance relationships.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Palme:1972:BGM,
author = "Jacob Palme",
title = "Beware of the {Gibson} mix",
journal = j-SIGMETRICS,
volume = "1",
number = "2",
pages = "10--11",
month = jun,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113640.1113642",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Evaluation of computer systems is sometimes made using
a so-called Gibson mix. This is a list of common
machine instructions with weights depending on how
often they are supposed to occur in typical programs.
By using these weights to estimate the mean instruction
execution time, the `speed' of a computer system is
supposed to be measured.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Johnson:1972:SST,
author = "Robert R. Johnson",
title = "Some steps toward an information system performance
theory",
journal = j-SIGMETRICS,
volume = "1",
number = "3",
pages = "4--15",
month = sep,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041599.1041600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A means for representing information handling systems
at the problem, program, and computer level is
presented. This means, Petri Nets, coupled with
classical information theory, provides quantitative
measures of system capacity and thruput as well
measures of `the work done.' Concepts of
information-capacity and of information-work are
derived from these probabilistically labeled Petri Nets
based on analogies to thermodynamics. Thruput is
measured as information-gain. Comments are made about
the possible significance of these concepts, their
relationship to classical thermodynamics, and the
directions of continuing thought stimulated by these
concepts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kernighan:1972:CAO,
author = "B. W. Kernighan and P. J. Plauger and D. J. Plauger",
title = "On comparing apples and oranges, or, my machine is
better than your machine",
journal = j-SIGMETRICS,
volume = "1",
number = "3",
pages = "16--20",
month = sep,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041599.1041601",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a recent comparison test, six computer
manufacturers were asked to code a particular program
loop to run as quickly as possible on their machine.
Presumably conclusions about the merits of the machines
were to be drawn from the resulting code. We have
reduced the number of Instructions for the loop by an
average of one instruction per machine, a 15\%
decrease. It appears that conclusions might more
appropriately be drawn about manufacturers' software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lynch:1972:DDA,
author = "W. C. Lynch",
title = "Do disk arms move?",
journal = j-SIGMETRICS,
volume = "1",
number = "4",
pages = "3--16",
month = dec,
year = "1972",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041603.1041604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:54 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement of the lengths of disk arm movements in a
2314 disk storage facility of an IBM 360/67 operating
under the Michigan Terminal System yielded the
unexpected data that the arms need not move in 63\% of
the accesses and need move for an average of only 30ms.
in the remaining 37\% of the cases. A description and
analysis of a possible mechanism of action is
presented. The predictions of this model do not
disagree with the measured data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Halstead:1973:LLM,
author = "M. H. Halstead",
title = "Language level, a missing concept in information
theory",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "7--9",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "According to Information Theory, [Cf Leon Brillouin,
Science and Information Theory, Academic Press, N. Y.
1956, pp. 292-3], the information content of a table of
numbers does not depend upon how difficult it was to
obtain the entries in the table, but only upon whether
or not we know how, or how precisely we know how, to
reconstruct the entire table or any parts of it.
Consequently, from present Information Theory, since we
`know in advance' how a table of since is constructed,
such a table contains absolutely no information. For a
person who does not `know in advance' how to construct
a table of sines, however, the table would indeed
contain `Information.' This ambiguity apparently
contradicts the basic statement [Leon Brillouin, op.
cit., page 10] that `Information is an absolute
quantity which has the same numerical value for any
observer,' a contradiction which remains even when we
accept Brillouin's next statement that `The human value
of the information, on the other hand, would
necessarily be a relative quantity, and would have
different values for different observers, according to
the possibility of their understanding it and using it
later.'",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Halstead:1973:EDP,
author = "M. H. Halstead",
title = "An experimental determination of the `purity' of a
trivial algorithm",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "10--15",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work in an area which might be designated as
Software Physics [1,2,3,4,5,6] has suggested that the
basic structure of algorithms may offer an interesting
field for experimental research. Such an experiment is
reported here. In an earlier paper [2], it was
suggested that a `Second Law' might be stated as:'The
internal quality, LV, of a pure algorithm is
independent of the language in which it is
expressed.'",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1973:RSC,
author = "Peter J. Denning",
title = "Review of {`Statistical Computer Performance
Evaluation' by Walter Frieberger; Academic Press
(1972)}",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "16--22",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is the proceedings of a conference held at
Brown University on November 22-23, 1971. The editors
state that only papers dealing with real data in a
reasonably sophisticated manner were accepted for the
conference. Papers dealing simply with the collection
of data, or with queueing-theoretic models, were
excluded. The papers are grouped into seven sections
corresponding to the seven sessions at the conference;
at the end of each section is a brief statement by the
one or two discussants of that session's papers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Svobodova:1973:CSN,
author = "Liba Svobodova",
title = "Communications: Some notes on the {Computer Synectics}
hardware monitor sum",
journal = j-SIGMETRICS,
volume = "2",
number = "1",
pages = "23--25",
month = mar,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041606.1041609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:49:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The longer I have been working with the hardware
monitor SUM, a device designed and manufactured by the
Computer Synectics, the less I have been pleased.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ishida:1973:JSU,
author = "Haruhisa Ishida and Nobumasa Takahashi",
title = "Job statistics at a 2000-user university computer
center",
journal = j-SIGMETRICS,
volume = "2",
number = "2",
pages = "2--13",
month = jun,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113644.1113645",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Computer Centre at the University of Tokyo is one
of 7 large university centers serving researchers
throughout Japan; it processes 120,000 jobs annually
submitted by 2,000 academic users in various research
institutions. A brief comparison of the 7 centers and
the breakdown of users are shown. To clarify the job
characteristics of these users, account data of all
jobs in an entire year were analyzed and the results
are presented. They are shown in terms of the
distribution of CPU time, numbers of input cards/output
pages/output cards, program size, job end conditions
and turnaround time etc. A special on-line card punch
is mentioned which punches holes in the 13th row to
separate output card decks. It was found that, when the
CPU speed was increased 8 times after replacement under
the same operating system, the average job size was
increased 4 times. Hence only twice as many jobs could
be processed. The results of analysis have been used
for systems performance evaluation (for example, the
CPU busy-rate was found to be 69\%), improvement and
for an input job model used in planning for the next
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rice:1973:AMC,
author = "Don R. Rice",
title = "An analytical model for computer system performance
evaluation",
journal = j-SIGMETRICS,
volume = "2",
number = "2",
pages = "14--30",
month = jun,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113644.1113646",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an analytical model of a computer
system useful in the evaluation of system performance.
The model is described in detail while the mathematics
are minimized. Emphasis is placed on the utility of the
model rather than the underlying theory and a number of
illustrative examples are included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kolence:1973:SE,
author = "Kenneth W. Kolence",
title = "The software empiricist",
journal = j-SIGMETRICS,
volume = "2",
number = "2",
pages = "31--36",
month = jun,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113644.1113647",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The advent of software and hardware monitoring
technology has presented us with a flood of data,
without bringing commensurate understanding by which to
interpret it. Thus, the most important problem before
us in the field of computer measurement is to discover
the relationships between the variables we measure and
the overall system properties of interest.
Particularly, we wish to be able to predict system
behavior and performance from a knowledge of the values
of factors under our control. In this way, not only
will we understand the meanings of these variables, but
we shall learn how to design our systems to perform as
we wish them to. The latter is a prime goal of software
engineering, the former the rational of what has been
called software physics. In this section of the Review
we are and shall be interested in the empirical
development of such an understanding, and the
experimental aspects of computer measurement. Our
intent is to assist in the building of a solid body of
knowledge by providing a publication vehicle for
empirical and experimental data. That is, we have
little interest in publishing theory, which can
normally be done elsewhere. Our goal is to publish
experimental data to support or refute theory, and
empirical data from which theory builders may take
their inspiration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kolence:1973:SUP,
author = "Kenneth W. Kolence and Philip J. Kiviat",
title = "Software unit profiles \& {Kiviat} figures",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "2--12",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the June, 1973 issue of the {\em Performance
Evaluation Review}, the concept of using circular
graphs (called Kiviat graphs by Kolence) to present
system performance data was introduced in the column
{\em The Software Empiricist}. In this article we wish
to report on some recent work in using such graphs to
present system and program profiles in a strikingly
visual way of potential use to all practitioners of
computer measurement. In discussing this data, we find
it necessary to comment on the meaning of the variables
used for such profiles in a way which also should be of
interest to practitioners.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1973:WOA,
author = "Peter J. Denning",
title = "Why our approach to performance evaluation is
{SDRAWKCAB}",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "13--16",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What does SDRAWKCAB mean? Some of you already know;
some I have told; some have guessed. But many do not
know. Those who do know, know it would be contrary to
the theme of SDRAWKCAB to tell you immediately what it
means, although it certainly would make things much
easier if I told you now.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Beck:1973:CSL,
author = "Norman Beck and Gordon Ashby",
title = "On cost of static linking and loading of subprograms",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "17--20",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to report some data
concerning cost in CPU processing due to loading
programs. The data was collected on a PDP-10, using
modifications made by the linking loader to the
prologue generated for FORTRAN complied programs, by
the addition of one UUO (a programmed operation similar
to an SVC on IBM 360/370), and several cells in the
monitor used as counters. The data covers the number of
programs loaded and the CPU ms expended loading them.
This data is broken down between programs that were
loaded and never entered and programs loaded and
eventually executed. It is further classified according
to periods of heavy use for program development and
periods of heavy production use.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kolence:1973:SEE,
author = "Ken Kolence",
title = "The software empiricist experimental disciplines \&
computer measurements",
journal = j-SIGMETRICS,
volume = "2",
number = "3",
pages = "21--23",
month = sep,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041613.1041617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The introduction and use of the capability for
quantitative measurements into the field of computer
science must inexorably lead to the development and use
of experimental approaches and techniques to discover,
understand, and verify relationships between the
observables of what is today loosely called computer
performance. The reason for this column appearing as a
regular feature in PER is to assist in the process of
bridging the gap in both directions between the
practitioners and theorists of the field. In the first
column in this series, we introduced the concepts of
empiricism and the initial discoveries of invariances
of values as foundations of this new aspect of computer
science. With this issue, we shall begin to investigate
the requirements and methodologies by which this
approach can be applied to the common benefit of both
the practical and theoretical orientations. When a
particular topic can be demonstrated with actual data
or equivalent means, it will be the topic of a separate
article.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1973:UHM,
author = "James Hughes and David Cronshaw",
title = "On using a hardware monitor as an intelligent
peripheral",
journal = j-SIGMETRICS,
volume = "2",
number = "4",
pages = "3--19",
month = dec,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113650.1113651",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:20 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Conventionally, hardware monitoring has been performed
using manually controlled off-line devices. It is
suggested that a hardware monitor incorporating program
control and acting as an intelligent peripheral device
would realize greater utility and wider application.
The development and application of such a device is
described; a combination of the merits of both software
and hardware monitoring techniques is claimed for it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Svobodova:1973:MCS,
author = "Liba Svobodova",
title = "Measuring computer system utilization with a hardware
and a hybrid monitor",
journal = j-SIGMETRICS,
volume = "2",
number = "4",
pages = "20--34",
month = dec,
year = "1973",
CODEN = "????",
DOI = "https://doi.org/10.1145/1113650.1113652",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:20 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer system utilization is generally measured in
terms of the utilization of individual system
components and the overlap of activities of two or more
system components. This type of data can be used to
construct a system performance profile [BONN 69, COCI
71, SUM 70]. Utilization of a system component is
obtained as the ratio (unit busy time)/(total elapsed
time). If a particular unit performs more than one type
of operation, the unit busy time may be further divided
into portions corresponding to different activities and
an activity profile can be constructed for each such
unit. For a storage unit, information about utilization
of different portions of storage might be desirable in
addition to utilization of this unit as a whole. A
space utilization profile Can be developed in this
case. To cover both cases, the term unit utilization
profile is used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wortman:1974:NHR,
author = "David B. Wortman",
title = "A note on high resolution timing",
journal = j-SIGMETRICS,
volume = "3",
number = "1",
pages = "3--9",
month = mar,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041619.1041620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ability to accurately time the execution of
sequences of machine instructions is an important tool
in the tuning and evaluation of computer hardware and
software. The complexity of modern hardware and
software systems often makes accurate timing
information difficult to obtain [1]. This note
describes an experimental comparison of timing
information provided by a large multiprogramming
operating system (OS/360 MVT) with timing information
derived directly from a high resolution hardware clock.
The hardware clock was found to be a superior source of
timing information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snyder:1974:QSA,
author = "Rowan Snyder",
title = "A quantitative study of the addition of extended core
storage",
journal = j-SIGMETRICS,
volume = "3",
number = "1",
pages = "10--33",
month = mar,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041619.1041621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In evaluating computer systems it is necessary to
identify the prime determinants of system performance,
and to quantify a performance metric. The purpose of
this paper is to present a quantitative study of the
effects of a significant hardware reconfiguration on
some measures of system performance, and thereby
demonstrate the effectiveness of Kiviat graphs in
performance analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merrill:1974:TCA,
author = "H. E. Barry Merrill",
title = "A technique for comparative analysis of {Kiviat}
graphs",
journal = j-SIGMETRICS,
volume = "3",
number = "1",
pages = "34--39",
month = mar,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041619.1041622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The article in September, 1973 Performance Evaluation
Review demonstrated again the utility of the Kiviat
Graph as a visual display of system profiles. A simple
extension of the concept of the Kiviat Graph permits a
realistic (though not necessarily linear) comparison of
two Kiviat graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peterson:1974:CSH,
author = "Thomas G. Peterson",
title = "A comparison of software and hardware monitors",
journal = j-SIGMETRICS,
volume = "3",
number = "2",
pages = "2--5",
month = jun,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041687.1041688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tests were performed to compare the accuracy of two
computer system monitors. Specifically, results from a
hardware monitor were compared with results from a
software monitor. Some of the subreports produced by
the software monitor were quite accurate; other
subreports were not quite so accurate, but they were
consistent from run to run. In view of these test
results, it appears that the software monitor can be
used to measure the effects of changes made in a system
tuning project.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Syms:1974:BCT,
author = "Gordon H. Syms",
title = "Benchmarked comparison of terminal support systems for
{IBM 360} computers",
journal = j-SIGMETRICS,
volume = "3",
number = "2",
pages = "6--34",
month = jun,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041687.1041689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A set of terminal scripts and benchmarks were derived
for comparing the performance of time sharing and batch
computer operating systems. Some of the problems
encountered in designing valid benchmarks for comparing
computer operating systems under both terminal and
batch loads are discussed. The results of comparing
TSS/360, CP/67 and MTS time sharing systems for the IBM
360/67 over a wide range of load conditions are
presented. The results of comparing TSS, MTS and OS/MVT
under batch loads are also presented. The tests were
conducted with Simplex and Dual processor
configurations with 256K bytes to 768K bytes of main
memory. The conclusions were quite surprising in that
CP/67 running on a minimal system performed
competitively with TSS/360 on a much larger dual
processor system. With equal configurations CP/67 out
performed TSS/360 by a wide margin. Furthermore, MTS
providing both batch and terminal support produced
performance that was 5 percent to 25 percent better
than the split configuration with CP/67 providing the
terminal support and OS/MVT providing the batch
processing support. Serious performance degradation of
the time sharing computer systems from overloading was
experienced and a simple solution is suggested to
prevent such degradation. The degradation was so severe
as to render the performance less than that of a
sequential job processor system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morris:1974:KGC,
author = "Michael F. Morris",
title = "{Kiviat} graphs: conventions and `figures of merit'",
journal = j-SIGMETRICS,
volume = "3",
number = "3",
pages = "2--8",
month = oct,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041691.1041692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Once in a very great while an idea comes along that
quickly captures many imaginations. The circular
graphic technique proposed nearly two years ago by Phil
Kiviat, our illustrious Chairman, and very
appropriately named `Kiviat Graphs' by our erst-while
(and sorely missed) `Software Empiricist,' Ken Kolence,
is one of these ideas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lujanac:1974:NSB,
author = "Paul L. Lujanac",
title = "A note on {Syms}' benchmarked comparison",
journal = j-SIGMETRICS,
volume = "3",
number = "3",
pages = "9--10",
month = oct,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041691.1041693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "If the load factor is expressed linearly as a fraction
of the capacity of a terminal-oriented system, we
assume that response times increase more or less
exponentially with an increase in load factor. Syms'
load factor is nonlinear, and, in fact, was designed to
`make the terminal response times approximately a
linear function of the load factors.'",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Graham:1974:MPB,
author = "G. Scott Graham and Peter J. Denning",
title = "Multiprogramming and program behavior",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "1--8",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809367",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic multiprogramming memory management strategies
are classified and compared using extant test data.
Conclusions about program behavior are then drawn.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwain:1974:MPV,
author = "A. Brandwain and J. Buzen and E. Gelenbe and D.
Potier",
title = "A model of performance for virtual memory systems",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "9--9",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809368",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models are well suited for analyzing
certain resource allocation problems associated with
operating system design. An example of such a problem
is the selection of the level of multiprogramming in
virtual memory systems. If the number of programs
actively competing for main memory is allowed to reach
too high a value, trashing will occur and performance
will be seriously degraded. On the other hand,
performance may also suffer if the level of
multiprogramming drops too low since system resources
can become seriously under utilized in this case. Thus
it is important for virtual memory systems to maintain
optimal or near optimal levels of multiprogramming at
all times. This paper presents an analytic model of
computer system behavior which can be used to study
multiprogramming optimization in virtual memory
systems. The model, which explicitly represents the
numerous interactions which occur as the level of
multiprogramming varies, is used to numerically
generate performance curves for representative sets of
parameters. A simplified model consisting of a CPU and
a single backing store device is then used to derive an
approximate expression for the optimal level of
multiprogramming. The simplified model is also used to
examine the transient behavior of such systems. The
mathematical model we present is based on some
simplifying assumptions; in particular all programs
executing in the system are supposed to be
statistically identical. In this respect the model we
present must be considered to be a theoretical
explanation of a phenomenon (thrashing) observed in
certain operating systems rather than an exact
representation of reality. Certain assumptions of the
mathematical model are relaxed in a simulation model
where distribution functions of service times at the
secondary memory and input-output devices are
arbitrary; by comparison with the theoretical results
we see that CPU utilization and throughput are not very
sensitive to the specific forms of these distributions
and that the usual exponential assumptions yield quite
satisfactory results. The simulation model is also
programmed to contain overhead. Again we observe that
the mathematical model's predictions are in fair
agreement with the useful CPU utilization predicted by
the simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
xxnote = "Check: author may be Brandwajn??",
}
@Article{Henderson:1974:OCW,
author = "Greg Henderson and Juan Rodriguez-Rosell",
title = "The optimal choice of window sizes for working set
dispatching",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "10--33",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809369",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of varying window size in a working set
dispatcher to control working set size and number of
page faults is examined. A space-time cost equation is
developed and used to compare fixed window size to
variable window size for different types of secondary
storage based on the simulated execution of real
programs. A general approach is indicated for studying
the relative merit of the two dispatching algorithms
and their interaction with different hardware
configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Dispatching; Optimal control; Resource allocation;
Supervisory systems; Time-sharing systems; Working
set",
}
@Article{Denning:1974:CLP,
author = "Peter J. Denning",
title = "Comments on a linear paging model",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "34--48",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809370",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The linear approximation relating mean time between
page transfers between levels of memory, as reported by
Saltzer for Multics, is examined. It is tentatively
concluded that this approximation is untenable for main
memory, especially under working set policies; and that
the linearity of the data for the drum reflects the
behavior of the Multics scheduler for background jobs,
not the behavior of programs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brice:1974:FCR,
author = "Richard S. Brice and J. C. Browne",
title = "Feedback coupled resource allocation policies in the
multiprogramming-multiprocessor computer system",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "49--53",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809371",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents model studies of some integrated
feedback-driven scheduling systems for a
multiprogrammed computer system. This abstract can
present only the conclusions of the studies and little
of the supporting data and detail. The basic format of
the analysis is to fix a size for the local buffers and
a total size for the collection buffers, to define a
set of algorithms for the determination of the data
removal quanta to the local buffers, the allocation of
space in the collection buffers, and the look-ahead
mechanism for input and then to evaluate the relative
merits of the various strategies by the resulting CPU
efficiency. Three feedback algorithms are studied as
examples in this work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Halachmi:1974:CCT,
author = "Baruch Halachmi and W. R. Franta",
title = "A closed, cyclic, two-stage multiprogrammed system
model and its diffusion approximation solution",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "54--64",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809372",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper attention is focused on closed
multiprogrammed computer type systems. In particular,
two-stage closed queueing systems are considered. The
first stage can be associated with the CPU (Central
Processing Unit) and the other with the I/O
(Input-Output) operations. For all the models
discussed. For the first model we consider the
{GI1/MS/N} system, which allows the service times of a
single CPU to obey any general probability
distribution, with finite variance, while the I/O
servers are taken to be exponential. The second model
is an extension of the first where the concept of
feedback is implemented in the CPU stage. This concept
plays an important role in computer environments where
the operating system includes the multiplexing or page
on demand property. The third model, the {MS1/MS2/N},
deals with multiprocessing computer systems where
possibly more than one CPU is available, but all
servers are assumed to be exponential. In the spirit of
the approximation to the GI/G/S open system, as a final
model, we construct the approximate solution to the
{GIS1/GIS2/N} closed system and discuss the
circumstances under which its use is advisable. Several
numerical examples for each of the models are given,
each accompanied by appropriate simulation results for
comparison. It is on the basis of these comparisons
that the quality of the suggested diffusion
approximations can be judged. The diffusion
approximating formulas should be regarded not only as a
numerical technique, but also as a simplifying
approach, by which deeper insight can be gained into
complicated queueing systems. Considerable work remains
to be done, using as a methodology the approach, given
here, and several possible extensions are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schwetman:1974:ATS,
author = "H. D. Schwetman",
title = "Analysis of a time-sharing subsystem (a preliminary
report)",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "65--75",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The MESA subsystem provides a wide variety of services
to remotely located users of the computing facilities
of the Purdue University Computing Center. This paper
presents the preliminary steps of an in-depth study
into the behavior of MESA. The study uses a software
data-gathering facility to analyze the usage and
queueing aspects of this behavior and to provide values
for parameters used by two models of the subsystem.
These models, a network-of-queues model and a
simulation model, are designed to project subsystem
behavior in different operating environments. The paper
includes a number of tables and figures which highlight
the results, so far, of the study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiser:1974:ASC,
author = "M. Reiser and A. G. Konheim",
title = "The analysis of storage constraints by a queueing
network model with blocking",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "76--81",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809374",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The finite capacity of storage has a significant
effect on the performance of a contemporary computer
system. Yet it is difficult to formulate this problem
and analyze it by existing queueing network models. We
present an analysis of an open queueing model with two
servers in series in which the second server has finite
storage capacity. This network is an exponential
service system; the arrival of requests into the system
is modeled by a Poisson process (of rate $ \lambda $)
and service times in each stage are exponentially
distributed (with rates $ \alpha $ and $ \beta $
respectively). Requests are served in each stage
according to the order of their arrival. The principal
characteristic of the service in this network is
blocking; when $M$ requests are queued or in service in
the second stage, the server in the first stage is
blocked and ceases to offer service. Service resumes in
the first stage when the queue length in the second
stage falls to $ M - 1$. Neuts [1] has studied
two-stage blocking networks (without feedback) under
more general statistical hypothesis than ours. Our goal
is to provide an algorithmic solution which may be more
accessible to engineers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schatzoff:1974:SVT,
author = "M. Schatzoff and C. C. Tillman",
title = "Statistical validation of a trace-driven simulator",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "82--93",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common problem encountered in computer system
simulation is that of validating that the simulator can
produce, with a reasonable degree of accuracy, the same
information that can be obtained from the modelled
system. This is basically a statistical problem because
there are usually limitations with respect to the
number of controlled tests which can be carried out,
and assessment of the fidelity of the model is a
function of the signal to noise ratio. That is, the
magnitude of error which can be tolerated depends upon
the size of the effect to be predicted. In this paper,
we describe by example how techniques of statistical
design and analysis of experiments have been used to
validate the modeling of the dispatching algorithm of a
time sharing system. The examples are based on a
detailed, trace-driven simulator of CP-67. They show
that identical factorial experiments involving
parameters of this algorithm, when carried out on both
the simulator and on the actual system, produced
statistically comparable effects.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1974:GPS,
author = "Domenico Ferrari and Mark Liu",
title = "A general-purpose software measurement tool",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "94--105",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A software measurement tool designed for the users of
PRIME, an interactive system being developed, is
presented. The tool, called SMT, allows its user to
instrument a program, modify a pre-existing
instrumentation and specify how the collected data are
to be reduced by typing in a few simple commands. The
user can also write his own measurement routines, which
specify the actions to be taken at event detection
time, and submit them to the SMT; after checking their
correctness, the SMT deals with them as with its
built-in, standard measurement routines. The design
goals of a general-purpose tool like the SMT are
discussed, and the prototype version of the tool, which
has been implemented, is described from the two
distinct viewpoints of a user and of a measurement-tool
designer. An example of the application of the
prototype to a measurement problem is illustrated, the
reasons why not all of the design goals have been
achieved in the implementation of the prototype are
reviewed, and some of the foreseeable extensions of the
SMT are described.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Foley:1974:EDD,
author = "James D. Foley and John W. McInroy",
title = "An event-driven data collection and analysis facility
for a two-computer network",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "106--120",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe an event-driven data
collection facility, and a general-purpose program to
perform a set of analyses on the collected data. There
are several features which distinguish this facility
from others. First, the system being monitored is a
network of loosely-coupled computers. Although there
are just two computers in the network, the facility
could be readily extended to larger networks. Second,
the main purpose of the facility is to monitor the
execution of interactive graphics application programs
whose processing and data are distributed between the
network's computers. Third, the data collector and
analyzer are readily extendible to treat new kinds of
data. This is accomplished by a data and event
independent collector, and a table-driven data
analyzer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Batson:1974:MVM,
author = "A. P. Batson and R. E. Brundage",
title = "Measurements of the virtual memory demands of
{Algol-60} programs (Extended Abstract)",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "121--126",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809378",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Programming languages such as Algol-60 use block
structure to express the way in which the name space of
the current environment, in the contour model (1) sense
of that word, changes during program execution. This
dynamically-varying name space corresponds to the
virtual memory required by the process during its
execution on a computer system. The research to be
presented is an empirical study of the nature of the
memory demands made by a collection of Algol-60
programs during execution. The essential
characteristics of any such resource request are the
amount of memory requested, and the holding time for
which the resource is retained and these distributions
will be presented for several components of the virtual
memory required by the Algol programs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sebastian:1974:HHE,
author = "Peter R. Sebastian",
title = "{HEMI} ({Hybrid Events Monitoring Instrument})",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "127--139",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "HEMI is an experimental instrumentation system being
developed for use with the CYBER 70 and 170 Series
computers in order to ascertain the extent to which an
integrated approach to instrumentation is economically
and technologically viable for performance measurement
and evaluation purposes. HEMI takes advantage of the
distributed CYBER computer architecture. This consists
of a pool of Peripheral Processors (PPs) --- (mainly
dedicated to I/O and system tasks) while the CPU
capabilities are reserved mostly for computation;
Central Memory constitutes the communications link.
HEMI uses one of the PPs as its major processor. A
hardware data acquisition front end is interfaced to
one of the I/O channels and driven by the PP. Hardware
probes sample events at suitable testpoints, while the
PP has software access to Central Memory (Operating
System tables and parameters), Status Registers, I/O
Channel Flags, etc. A data reduction package is used to
produce a variety of reports from the data collected. A
limited on-line data reduction and display capability
is also provided. This paper will describe the current
status of the project as well as anticipated
applications of HEMI.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1974:IAC,
author = "Springer W. Cox",
title = "Interpretive analysis of computer system performance",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "140--155",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A typical performance evaluation consists of the
identification of resources, the definition of system
boundaries, the measurement of external and internal
performance variables, and finally the interpretation
of data and projection of system performance to
hypothetical environments. These projections may be
used to estimate the cost savings to be expected when
changes are made to the system. The fundamental
external performance measures such as response time and
thruput are intimately related, but may be defined
differently depending on how the system is defined.
They can be analyzed with respect to the internal
performance measures (such as activities, queue lengths
and busy times) by applying one or more interpretations
such as: absolute utilizations, normalized busy times,
system profiles, analysis of response, workload
relaxation, and resource consumption hyperplanes. These
models, which are generally free of assumptions
regarding interarrival and service time distributions,
can be adjusted to represent potential changes to the
system. Then the interpretations may be used to
evaluate the predicted external performance measures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Noe:1974:DYC,
author = "J. D. Noe and N. W. Runstein",
title = "Develop your computer performance pattern",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "156--165",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Is the load on your computer shifting? Did that change
to faster access disks really help? Would more core
memory increase throughput appreciably, or would it be
necessary to also increase central processor power?
These are three quite different kinds of questions; one
concerns detecting a long-term trend, another assessing
the effects of a system change, and a third estimating
effects of the decision to alter the configuration. Yet
all of these require knowledge of current and past
system performance, the type of knowledge that must be
the result of long-term performance monitoring. This is
not simple enough to be picked up overnight or in one
series of experiments, nor can it be assessed by
watching one or two parameters over a long period. One
must have a thorough understanding of the pattern of
performance by knowing the mean values of a number of
measures and knowing something about the variations
from these means. This paper hardly needs to recommend
that computer managers establish an understanding of
performance pattern; they already are very conscious of
the need. What it does is recount development of a
method of doing so for the CDC 6400 at the University
of Washington and of the selection of ``Kiviat Graphs''
as a means to present data in a synoptic form. The
remainder of this paper will give a brief account of
the authors' experience in designing a measurement
system for the CDC 6400 at the University of Washington
Computer Center. This will include comments on the
approach to deciding what to measure and display for
the synoptic view of the system, as well as how to
provide more detailed data for backup. Examples of the
use of Kiviat Graphs [4] to show the effects of load
shift and of a system configuration change are
included, and the effect of a change of operating
system will be noted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brotherton:1974:CCC,
author = "D. E. Brotherton",
title = "The computer capacity curve --- a prerequisite for
computer performance evaluation and improvement",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "166--179",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809382",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurements of themselves have tended to concentrate
on specific computer configuration components (e.g.,
CPU load, channel load, disk data set contention,
problem program optimization, operating system
optimization, etc.) rather than at the total computer
configuration level. As a consequence, since these
components can have a high degree of interaction, the
requirement currently exists for a workable
configuration performance concept which will reflect
the configuration performance change that is the
resultant of single or multiple component change. It is
the author's opinion that such a concept will provide
management and measurement specialists a planning and
analysis tool that can be well Used in evaluating the
costs. It is to this configuration performance concept
that this paper is addressed, and the concept by my
choosing is named ``The Computer Capacity Curve.''",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Erikson:1974:VCU,
author = "Warren J. Erikson",
title = "The value of {CPU} utilization as a criterion for
computer system usage",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "180--187",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is generally agreed that a computer system's CPU
utilization means little by itself, but there has been
only a limited amount of research to determine the
value of CPU utilization when used with other
performance measures. This paper focuses on
time-sharing systems (or similar systems such as some
remote batch systems) as viewed by someone who wants to
minimize the mean cost per job run on the system. The
paper considers cost per job to include both the
computer cost (as allocated among all the jobs run on
the system) and the user cost (where user cost is the
time spent waiting for a response from the system
multiplied by the user's wage rate). Given this
approach, cost per job is a function of some constants
(user wage rate, computer system cost, and mean
processing time per job) and only one variable (CPU
utilization). The model thus developed can be used to
determine the optimum CPU utilization for any system.
It can also be used to determine the value of different
tuning efforts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Badel:1974:AOP,
author = "M. Badel and E. Gelenbe and J. Leroudier and D. Potier
and J. Lenfant",
title = "Adaptive optimization of the performance of a virtual
memory computer",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "188--188",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007773.809384",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is known that the regulation of the degree of
multiprogramming is perhaps one of the most important
factors determining the overall performance of a
virtual memory computer. In this paper we present an
approach which differs some what from the approaches
usually taken to regulate the degree of
multiprogramming, which are mainly derived from the
working-set principles. We design a controller which
will regulate the system in order to optimize a given
performance measure. The controller is applied to a
system where the critical resource is primary memory,
and we are only concerned with systems where
ineffective regulation leads to the phenomenon known as
thrashing due to extensive paging activity. In the
first section, the dynamics of the system we wish to
regulate are investigated using an analytical model.
The system consists of a set of terminals and of a
resource loop (CPU, secondary memory device, file disk)
shared by the users. Using classical assumptions about
program behavior (e.g., life-time function), the
throughput of the RL is obtained as a function of the
degree of multiprogramming $n$ (number of users sharing
the resources at a given instant of time) and of the
system parameters. This result provides a greater
insight of the ``plant'' we wish to control. The
mathematical results are validated and extended with
data from simulation experiments using a more detailed
model (overheads and non-exponential assumption). In
the next section, a criterion called ``dilatation''
based on the utilization of the different resources is
defined. From the analytical and simulation results of
the first section, it can be shown that there exists a
value no of the degree of multiprogramming which
maximizes this criterion. The regulation of $n$ to no
is achieved by controlling the access of the users to
the RL. The value of no is estimated in real-time
through a continuous estimation of the two first
moments of the criterion. Using these estimations, the
decision of introducing or not a new user in the RL is
taken whenever a user leaves a terminal or departs from
the RL. Extensive simulation experiments were
conducted, where the implementation of the different
functions of the controller have been thoroughly
simulated. They have shown that the control scheme
leaves to an improvement of the system performance in
mean response time and resource utilization, and,
overall, adapts in real-time the degree of
multiprogramming to the characteristics of the users
(the adaptation is performed in 4 sec. or so for a unit
variation of the optimal degree of multiprogramming). A
discussion of practical application of results ends the
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kimbleton:1974:BCS,
author = "Stephen R. Kimbleton",
title = "Batch computer scheduling: a heuristically motivated
approach",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "189--198",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809385",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Efficient scheduling of jobs for computer systems is a
problem of continuing concern. The applicability of
scheduling methodology described in the operations
research literature is severely restricted by the
dimensionality of job characteristics, the number of
distinct resource types comprising a computer system,
the non-deterministic nature of the system due to both
interprocess interaction and contention, and the
existence of a multitude of constraints effecting job
initiation times, job completion times, and job
interactions. In view of the large number of issues
which must be considered in job scheduling, a heuristic
approach seems appropriate. This paper describes an
initial implementation of such an approach based upon a
fast, analytically driven, performance prediction
tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharp:1974:APD,
author = "Joseph C. Sharp and James N. Roberts",
title = "An adaptive policy driven scheduler",
journal = j-SIGMETRICS,
volume = "3",
number = "4",
pages = "199--208",
month = dec,
year = "1974",
CODEN = "????",
DOI = "https://doi.org/10.1145/800277.809386",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:50:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The theory of policy driven schedulers (Ref. [1]) is
extended to cover cases in which the scheduling
parameters are allowed to adapt dynamically as the
system's job load varies. The system under
consideration offers batch, time sharing and limited
real time services. Data from simulated and live loads
are presented to evaluate both the static and the
adaptive schedulers. A policy driven scheduler makes
its decisions with respect to a set of policy
functions, fi(t). Each of the policy functions
corresponds to a different type of user and specifies
the amount of computing resources that the system will
try to give a user in that group within a given total
amount of elapsed time. It is found that the policy
functions must be set conservatively in order to avoid
response problems during periods of heavy load, but
that during more lightly loaded periods the
conservative settings result in widely disparate rates
of service to similar jobs. One solution is to vary the
policy functions as the job load changes. A dynamic
algorithm is presented that maintains responsiveness
during heavy loads and provides fairly uniform service
rates at other times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merrill:1975:FCC,
author = "H. W. Barry Merrill",
title = "Further comments on comparative evaluation of {Kiviat}
graphs",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "1--10",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mike Morris has presented an excellent discussion in
these pages (1) of the use of Kiviat Graphs for
Computer Performance Evaluation, referencing another
fine article (2) which proposed a technique for
analytic comparisons (rankings) of these Graphs. Morris
also proposes that these techniques may be very useful
in describing system performance, and suggests a
different method for calculation of `Figures of Merit'
of Kiviat Graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stevens:1975:NFM,
author = "Barry A. Stevens",
title = "A note on figure of merit",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "11--19",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since Merrill proposed a Figure of Merit (FOM) for use
in interpretation of the Kiviat Graph (KG), the FOM has
found its way into at least one computer program to
plot those graphs, and has been the subject of further
discussion and amplification and has had alternate
computation methods proposed and rebutted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bell:1975:MCP,
author = "Thomas E. Bell",
title = "Managing computer performance with control limits",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "21--28",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dr. Bell received his doctorate in Operations
Management from the University of California at Los
Angeles in 1968. He immediately joined the Rand
Corporation as a Member of the Technical Staff in its
Computer Science Department and undertook research in
the simulation and perfomance improvement of computing
systems. During this research he participated in the
definition of the Extendable Computer System Simulator,
the development of a methodology for computer
performance improvement, and analysis of large,
multi-machine computer installations. He also analyzed
requirements for future command-and-control systems and
for logistic systems, in order to determine required
system functions and hardware size. He left Rand in
early 1974 to join the Software Research and Technology
Staff of TRW Systems Group where he is currently
developing improved techniques to specify the
requirements of computer software systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Browne:1975:AMP,
author = "J. C. Browne",
title = "An analysis of measurement procedures for computer
systems",
journal = j-SIGMETRICS,
volume = "4",
number = "1",
pages = "29--32",
month = jan,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041695.1041699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper purports to be a partial record of the
remarks made by the author at a panel session sponsored
by SIGMETRICS at the 1974 ACM National Conference in
San Diego. All of the material covered in the talk is
not included here primarily because it appears in other
contexts or in the presentations of other speakers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Terplan:1975:COR,
author = "Kornel Terplan",
title = "Cost-optimal reliability of data processing systems",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "1--12",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the advent of third generation computing systems,
the increase in complexity and power has reached a
degree which exceeds the human ability to understand,
to analyze, to predict, and to optimize system
performance and reliability. The only method that can
help is measurement. In defining measurement purposes,
one has to define which measurable quantities in the
system are significant and which may be ignored. But,
at the present time, we do not know in general what is
relevant in the measurements. For the sake of clarity,
it is useful to define several levels of measurement
organizational level --- computer center level-
computing system level --- job level --- computer
subsystem level.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Landwehr:1975:USM,
author = "Carl E. Landwehr",
title = "Usage statistics for {MTS}",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "13--23",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The following report is presented in response to
Professor Browne's request for case studies of
performance measurement projects; this study takes a
macroscopic view of a large-scale time sharing and
batch processing installation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reddy:1975:EEM,
author = "Y. V. Reddy",
title = "Experimental evaluation of a multiprogrammed computer
system",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "24--32",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041704",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on the design and analysis of a
statistical experiment conducted on a `live' job stream
to determine the effect of segment size used for
storage allocation on the system performance.
Performance measures selected are turnaround time,
total cost and CPU utilization. The experiment consists
of one factor, the segment size, at five levels.
Uncontrolled factors such as EXCP's (number of I/O
starts) and core usage are included as covariates in
the analysis of variance. This experiment is part of a
continuing activity of Measurement, Evaluation and
Simulation. It is designed to provide data for
improving performance incrementally. The results of the
experiment provided an optimal segment size for the
given classing/scheduling algorithm and core-layout.
Design objectives and details of the analysis are also
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhandarkar:1975:PAM,
author = "Dileep P. Bhandarkar",
title = "A practical application of memory interference
models",
journal = j-SIGMETRICS,
volume = "4",
number = "2",
pages = "33--39",
month = apr,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041701.1041705",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper briefly describes an approximate Markov
chain model for memory interference in a multiprocessor
system like C.mmp. The modeling assumptions explain the
level of abstraction at which the analysis is carried
out. Some empirical measurements are presented to
determine the model parameters for C.mmp. The analytic
results obtained from the model are compared with some
measured and simulation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bahr:1975:NFM,
author = "Dieter Bahr",
title = "A note on figures of merit",
journal = j-SIGMETRICS,
volume = "4",
number = "3",
pages = "1--3",
month = jul,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041707.1041708",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are different ways to compute figures of merit
(FOM). You may use Morris' [1] or Merrill's method [2]
or create any new one. But, in my opinion, that does
not answer the question whether these numbers are
nonsense or not.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boehm:1975:ICP,
author = "B. W. Boehm and T. E. Bell",
title = "Issues in computer performance evaluation: some
consensus, some divergence",
journal = j-SIGMETRICS,
volume = "4",
number = "3",
pages = "4--39",
month = jul,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041707.1041709",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper summarizes the results of an ACM/NBS
Workshop on Computer Performance Evaluation. Computer
Performance Evaluation (CPE) was selected as the
subject of an ACM/NBS Workshop because of the
significant leverage CPE activities can have on
computer usage. This paper describes a number of
conclusions abstracted from the discussions as well as
presenting recommendations formally adopted by the
participants. While several of these conclusions
indicate that improvements are needed in performance
analysis tools, another suggests that improved
application of CPE could be achieved by better
documentation of analysis approaches. More integration
of data collection and modeling are considered
necessary for the performance analysis field to develop
its full potential. Participants noted that the common
emphasis on data collection or modeling, to the
exclusion of considering objectives, often seriously
degrades the value of performance analyses; the only
savings that really count from a performance analysis
are the ones that appear on the bottom line of the
balance sheet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barber:1975:BC,
author = "Eric Ole Barber and Arne Asphjell and Arve Dispen",
title = "Benchmark construction",
journal = j-SIGMETRICS,
volume = "4",
number = "4",
pages = "3--14",
month = oct,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041711.1041712",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A partially automated method of generating benchmarks
for comparison of EXEC 8 with other systems has been
developed as one step in preparation for choosing a new
computer at the University of Trondheim.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marrevee:1975:MPP,
author = "J. P. Marrev{\'e}e",
title = "Measurements of the {Philips P1400} multiprogramming
system",
journal = j-SIGMETRICS,
volume = "4",
number = "4",
pages = "15--45",
month = oct,
year = "1975",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041711.1041713",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of performance measurements have been made on
a Philips P1000 computer under its Multiprogramming
System (MPS) in a business applications environment.
All measurements were collected by software monitoring
programs which were developed with the following
objectives in mind: general applicability; minimum
overhead; and, as much as possible, independence of
Monitor releases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wright:1976:AET,
author = "Linda S. Wright and William A. Burnette",
title = "An approach to evaluating time sharing systems:
{MH-TSS} a case study",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "8--28",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The authors conducted a benchmark measurement of the
Murray Hill Time Sharing System (MH-TSS) running on a
Honeywell 6000. The object of the test was to duplicate
the load normally present on the Murray Hill production
system, and measure the system's behavior before and
after a major software release and a major hardware
improvement. Five different load levels, from 30 to 90
users, were measured for each configuration. This paper
discusses the methods used in the design of the
experiment and in the analysis and interpretation of
the results. Several measurement tools were used in
this test. The event trace collection facility of
MH-TSS was used for the benchmark measurement and for
the design and fine tuning of a scrint representing the
normal load at Murray Hill. A commercially available
H6000-specific terminal simulator was used to feed
these scripts to the system. The batch background
system was loaded by a stream of synthetic jobs,
matched in resource usage characteristics to a set of
jobs chosen at random from the job stream of the
production system. The event trace data gathered at
various load levels under the three software and
hardware configurations were analyzed using two
techniques employing a state transition representation
of program behavior and system response. The result was
a set of data which documents the expected performance
improvements for the new software and hardware being
installed at Murray Hill, and which suggests the
expected growth potential for MH-TSS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "event trace; monitoring; operating systems; queuing
networks; response time; state transition models",
}
@Article{Calcagni:1976:SRK,
author = "John M. Calcagni",
title = "Shape in ranking {Kiviat} graphs",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "35--37",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to address the topic of
ranking or comparing Kiviat Graphs. Several articles
have appeared on the subject. For background
information the reader is directed to the original
article by Philip Kiviat and Kenneth Kolence (1) and to
the articles on ranking by Barry Merrill (2, 4) and
Michael Morris. The main emphasis here will be on
showing how automatic inclusion of axis-value
normalizations and hence of pattern normalization can
be achieved. It is hoped that this will be one way of
making the ranking of Kiviat Graphs more meaningful and
hence more useful. Pattern recognition is, after all,
one of the main reasons for using the Kiviat Graph
technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eisenfeld:1976:IRH,
author = "J. Eisenfeld and David R. Barker and David J.
Mishelvich",
title = "Iconic representation of the human face with computer
graphics",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "38--39",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are many applications for the iconic
representation of the human face. The program discussed
here was designed to describe the face by means of
measurements made on a skeletal radiograph and, in
particular, could be used to indicate changes resulting
from oral surgery. The computer generated faces are
drawn using a program modified by the authors which was
produced and kindly given to us by Mr Robert Jacob and
Dr William H. Huggins of the Johns Hopkins University.
Their program was based on that developed by Dr Herman
Chernoff (1) of Stanford University. The program was
originally designed for the presentation of
multivariate statistical data and was modified by Jacob
and Huggins for use in iconic communication. As a
result of our modifications, the mouth, nose, and
facial outline are presented more realistically, the
data input is interactive and quicker, especially when
only a few input variables are more directly related to
facial components to facilitate accuracy in drawing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nutt:1976:TCS,
author = "Gary J. Nutt",
title = "Tutorial: computer system monitors",
journal = j-SIGMETRICS,
volume = "5",
number = "1",
pages = "41--51",
month = jan,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The most important questions to be answered before
attempting to monitor a machine are {\em what\/} to
measure and {\em why\/} the measurement should be
taken. There is no general answer to these questions,
although a comprehensive set of considerations has been
discussed elsewhere. The following example indicates
some of the considerations involved. Suppose one is
interested in tuning a medium scale system which
utilizes virtual memory to support a batch
multiprogramming strategy. The nature of the job load
is a major factor in determining system performance;
the mix may be monopolized by I/O-bound jobs which use
very little processor time. In this case, the
bottleneck might be the mass storage system or the
peripheral devices. Resource utilization of the
peripheral devices may indicate bottlenecks at that
point; high mass storage utilization may not be
attributable only to the I/O operations, but may be
significantly influenced by the virtual memory
replacement policy. Processor utilization in this
system is also an insufficient measure for most
purposes, since the overhead time for spooling,
multiprogramming, and virtual memory may be unknown. A
more useful measurement for operating system policy
studies would quantify processor utilization for the
user as well as for each function of interest in the
operating system. From this example, one can see that
the variety of evaluation objectives and computer
systems causes the determination of what and why to be
largely a heuristic problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cotton:1976:SFP,
author = "Ira W. Cotton",
title = "Some fundamentals of price theory for computer
services",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "1--12",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The authors conducted a benchmark measurement of the
Murray Hill Time Sharing System (MH-TSS) running on a
Honeywell 6000. The object of the test was to duplicate
the load normally present on the Murray Hill production
system, and measure the system's behavior before and
after a major software release and a major hardware
improvement. Five different load levels, from 30 to 90
users, were measured for each configuration. This paper
discusses the methods used in the design of the
experiment and in the analysis and interpretation of
the results. Several measurement tools were used in
this test. The event trace collection facility of
MH-TSS was used for the benchmark measurement and for
the design and fine tuning of a scrint representing the
normal load at Murray Hill. A commercially available
H6000-specific terminal simulator was used to feed
these scripts to the system. The batch background
system was loaded by a stream of synthetic jobs,
matched in resource usage characteristics to a set of
jobs chosen at random from the job stream of the
production system. The event trace data gathered at
various load levels under the three software and
hardware configurations were analyzed using two
techniques employing a state transition representation
of program behavior and system response. The result was
a set of data which documents the expected performance
improvements for the new software and hardware being
installed at Murray Hill, and which suggests the
expected growth potential for MH-TSS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "event trace; monitoring; operating systems; queuing
networks; response time; state transition models",
}
@Article{Giammo:1976:DCP,
author = "Thomas Giammo",
title = "Deficiencies in computer pricing structure theory",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "13--21",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to address the topic of
ranking or comparing Kiviat Graphs. Several articles
have appeared on the subject. For background
information the reader is directed to the original
article by Philip Kiviat and Kenneth Kolence (1) and to
the articles on ranking by Barry Merrill (2, 4) and
Michael Morris. The main emphasis here will be on
showing how automatic inclusion of axis-value
normalizations and hence of pattern normalization can
be achieved. It is hoped that this will be one way of
making the ranking of Kiviat Graphs more meaningful and
hence more useful. Pattern recognition is, after all,
one of the main reasons for using the Kiviat Graph
technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kimbleton:1976:CPD,
author = "Stephen R. Kimbleton",
title = "Considerations in pricing distributed computing",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "22--30",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are many applications for the iconic
representation of the human face. The program discussed
here was designed to describe the face by means of
measurements made on a skeletal radiograph and, in
particular, could be used to indicate changes resulting
from oral surgery. The computer generated faces are
drawn using a program modified by the authors which was
produced and kindly given to us by Mr Robert Jacob and
Dr William H. Huggins of the Johns Hopkins University.
Their program was based on that developed by Dr Herman
Chernoff (1) of Stanford University. The program was
originally designed for the presentation of
multivariate statistical data and was modified by Jacob
and Huggins for use in iconic communication. As a
result of our modifications, the mouth, nose, and
facial outline are presented more realistically, the
data input is interactive and quicker, especially when
only a few input variables are more directly related to
facial components to facilitate accuracy in drawing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kiviat:1976:BRG,
author = "Philip J. Kiviat",
title = "A brief review of the {GAO} task group's
recommendations on management guidelines for pricing
computer services in the federal government",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "71--83",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041715.1041719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The most important questions to be answered before
attempting to monitor a machine are {\em what\/} to
measure and {\em why\/} the measurement should be
taken. There is no general answer to these questions,
although a comprehensive set of considerations has been
discussed elsewhere. The following example indicates
some of the considerations involved. Suppose one is
interested in tuning a medium scale system which
utilizes virtual memory to support a batch
multiprogramming strategy. The nature of the job load
is a major factor in determining system performance;
the mix may be monopolized by I/O-bound jobs which use
very little processor time. In this case, the
bottleneck might be the mass storage system or the
peripheral devices. Resource utilization of the
peripheral devices may indicate bottlenecks at that
point; high mass storage utilization may not be
attributable only to the I/O operations, but may be
significantly influenced by the virtual memory
replacement policy. Processor utilization in this
system is also an insufficient measure for most
purposes, since the overhead time for spooling,
multiprogramming, and virtual memory may be unknown. A
more useful measurement for operating system policy
studies would quantify processor utilization for the
user as well as for each function of interest in the
operating system. From this example, one can see that
the variety of evaluation objectives and computer
systems causes the determination of what and why to be
largely a heuristic problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morris:1976:PIP,
author = "Michael F. Morris",
title = "Problems in implementing and processing computer
charging schemes",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "84--88",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041744",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is important to point out at the beginning of this
presentation that we have strayed quite far from the
titled topic of our workshop --- `Pricing Computer
Services.' This makes my task much easier because I'm
not at all sure what `service' we get from computers
and `pricing' is seldom related in any economic sense
with the cost of production. Here we have really been
discussing `Charging for Computer Resource Usage.' I
will stay with the topic as we've been discussing it
rather than with the topic as I thought it should be.
To make to distinction clear between pricing services
and charging for resource usage I will relate a very
simple story from a recent newspaper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luderer:1976:CPM,
author = "Gottfried W. R. Luderer",
title = "Charging problems in mixed time-sharing\slash batch
systems: cross subsidization and invariant work units",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "89--93",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041745",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses two topics related to charging
for computing services in mixed timesharing/batch
systems. The first one is the problem of cross
subsidization between time-sharing and batch service. A
method is proposed which helps to avoid this
phenomenon. The second topic deals with the question of
helping the user to divide his work between
time-sharing and batch service based on charging
information. Basically, the approach is to define a
service-invariant computing work unit, which is priced
differently according to grade of service. Time-sharing
and batch are considered to be different grades of
service. The cost impact of moving work between
services can thus be more easily estimated. A method
for calculating grade-of-service factors from cost and
workload estimates is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Oatey:1976:STM,
author = "David J. Oatey",
title = "{SIGMETRICS} technical meeting on pricing computer
services",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "94--102",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041746",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This presentation will show how one large installation
actually does pricing of several on-line systems. This
is a `pricing in practice' example with the resultant
procedures, measures, and pricing determined by the
blending of several practical, political, and
theoretical influences.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gutsche:1976:UE,
author = "Richard H. Gutsche",
title = "User experience",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "103--107",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041747",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Security Pacific is the tenth largest bank in the
United States, operating 500 banking locations in the
State of California. Our Electronic Data Processing
Department serves the entire system from its Glendale
Operations Center and a satellite center in Hayward.
The Hayward location serves as an input/output center
for our Northern California banking offices. Data
Transmission provides for centralization of all
accounting functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anonymous:1976:PC,
author = "Anonymous",
title = "Participant's choice",
journal = j-SIGMETRICS,
volume = "5",
number = "1c",
pages = "108--122",
month = mar,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041739.1041748",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:47 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During these two sessions, chaired by Richard Gutsche
of Security Pacific National Bank, a panel of experts
addressed specific pricing problems the participants
and attendees felt were important. The preliminary
questions that the panelists addressed included: $
\bullet $ What should be included in an overhead charge
and why? $ \bullet $ Should a computer center be
price-competitive with an outside market?$ \bullet $
Funding a computer center --- real or funny money?$
\bullet $ What is an appropriate charging philosophy
for a paging environment?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luderer:1976:DCR,
author = "Gottfried W. R. Luderer",
title = "Defining a computer resource unit",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "5--10",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041722",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method for the construction of a resource component
charging formula for computer service in a
multiprogramming system is defined. Charges are
proportional to relative resource costs, to fractional
resource use with regard to total expected resource
usage, and the intent is to recover cost without profit
or loss. Further, a method is presented that simplifies
the treatment of overhead or unallocatable resource
costs. An aggregate `Computer Resource Unit' is
defined, which attempts to characterize workload in a
system-invariant way. Experiences with this concept and
its limitations are discussed. Recommendations for
those planning to introduce a similar concept are
given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer charging; overhead allocation; virtual time;
workload characterization",
}
@Article{Roehr:1976:PIT,
author = "K. Roehr and K. Niebel",
title = "Proposal for instruction time objectives",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "11--18",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041723",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The designer of an instruction processing unit is
generally faced with the problem to implement a machine
able to execute a given instruction set within given
timing and cost constraints. A very common method to
state instruction timing constraints is by means of an
average instruction time",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Collins:1976:PIC,
author = "John P. Collins",
title = "Performance improvement of the {CP-V} loader through
use of the {ADAM} hardware monitor",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "63--67",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041724",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ADAM hardware monitor can be used to localize and
identify several types of performance-impairing
behavior in user programs. This paper presents a case
study for such an improvement carried out on the CP-V
overlay loader. Through measurement of the execution
behavior and the subsequent analysis of the resulting
data, problems of three basic types were identified: 1.
The presence of inefficiently coded routines in areas
of high execution intensity; 2. The use of overly
general routines along heavily-used program paths; and
3. The use of inefficient algorithms for processing the
large amounts of data with which the loader deals. The
subsequent redesign and recoding of the problem areas
have resulted in a significant performance improvement:
the time required to load a program has been reduced by
a factor of between two and ten, dependent upon the
nature of the program and the loader options
specified.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwajn:1976:SLI,
author = "A. Brandwajn",
title = "Simulation of the load of an interactive system",
journal = j-SIGMETRICS,
volume = "5",
number = "2",
pages = "69--92",
month = apr,
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041721.1041725",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe a simulator of interactive users designed
for the resource sharing system ESOPE. We stress the
guide-lines of the design as well as the problems of
interface with the operating system, of measurements,
and of perturbations caused by the simulator in the
statistics gathered. We show two examples of an
application of the simulator to the design of a
resource-sharing system, viz., to an analysis of load
regulation policies, and to an evaluation of the
improvement in system performance one may expect from
implementing shared translators. Finally, we use the
load simulator to validate a mathematical model. The
latter is developed by step-wise refinement, using
measured values of model parameters, till a good
agreement between the performance indices computed from
our model and those measured in a real system under
simulated load, is obtained. It is observed that, for
most of the performance measures considered, a simple
model matches fairly well the `real world'.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coppens:1976:QER,
author = "G. W. J. Coppens and M. P. F. M. van Dongen and J. P.
C. Kleijnen",
title = "Quantile estimation in regenerative simulation: a case
study",
journal = j-SIGMETRICS,
volume = "5",
number = "3",
pages = "5--15",
month = "Summer",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041727.1041728",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model key-punching in a computer center as a
queuing simulation with 2 servers (typists) and 3
priority classes (small, medium, large jobs). The 90\%
quantile of queuing time is estimated for different
borderlines between the 3 job classes. Confidence
intervals for the quantiles are based on the
regenerative properties of the simulation, as derived
by Iglehart (1974). They utilize the asymptotic
normality of the estimated quantile, and a rather
complicated expression for its variance. Numerical
results are given for the quantiles (and averages) of
the queuing times in each job class, for several
borderlines between the 3 job classes. The effects of
simulation runlength on the confidence intervals were
also examined. The effects of varying job-class
borderlines were tentatively modeled by a regression
model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Estell:1976:HFRa,
author = "Robert G. Estell",
title = "How fast is `real-time'?",
journal = j-SIGMETRICS,
volume = "5",
number = "3",
pages = "16--18",
month = "Summer",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041727.1041729",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A single bench mark test was compiled and run on the
AN/UYK-7 computer, and on a number of commercial
computers, in order to measure the relative throughput
of the UYK-7, which is the Navy's large scale real-time
computer. The results indicate the speeds and
accuracies of each host; however, general conclusions
can be drawn only with some risk.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mills:1976:SMC,
author = "Philip M. Mills",
title = "A simple model for cost considerations in a batch
multiprocessor environment",
journal = j-SIGMETRICS,
volume = "5",
number = "3",
pages = "19--27",
month = "Summer",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041727.1041730",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:51:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a simple model which provides a
procedure for estimating the effect of additional
hardware on run time. The additional hardware may be
additional processors, more powerful processors, an
increase in memory size or additional memory modules.
Run time is related to cost effectiveness. A measure of
memory interference in the form of effective processing
power is determined for multiprocessors and used in the
formulation of run time. The overall procedure allows
the user to compare different multiprocessor hardware
configurations on a cost effective basis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchanan:1976:IBM,
author = "Irene Buchanan and David A. Duce",
title = "An interactive benchmark for a multi-user minicomputer
system",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "5--17",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041733",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The work that forms the basis for this paper was
undertaken as part of an exercise to purchase two
multi-user minicomputer systems to be developed as
interactive facilities for grant holders supported by
the Engineering Board of the United Kingdom Science
Research Council.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Estell:1976:HFRb,
author = "Robert G. Estell",
title = "How fast is `real-time'?",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "18--20",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041734",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A single bench mark test was compiled and run on the
AN/UYK-7 computer, and on a number of commercial
computers, in order to measure the relative throughput
of the UYK-7, which is the Navy's large scale real-time
computer. The results indicate the speeds and
accuracies of each host; however, general conclusions
can be drawn only with some risk.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rafii:1976:SPR,
author = "Abbas Rafii",
title = "Study of the performance of {RPS}",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "21--38",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041735",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The objective of this study is to evaluate the impact
of RPS (Rotational Position Sensing) on the response
time and utilization of multiple spindle disk drives
with a shared channel. Simulation models are used to
compare the effectiveness of the RPS scheme with the
systems without RPS capability. Analytical models for
the number of RPS rotation misses and the utilization
of the channel at the saturation point are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Price:1976:CQN,
author = "Thomas G. Price",
title = "A comparison of queuing network models and
measurements of a multiprogrammed computer system",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "39--62",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041736",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although there has been a substantial amount of work
on analytical models of computer systems, there has
been little experimental validation of the models. This
paper investigates the accuracy of the models by
comparing the results calculated using analytical
models with measurements of an actual system. Models
with and without overlapped seeks are compared. Also,
we show how a model can be used to help interpret
measurements of a real system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical models; performance measurement and
evaluation; queuing networks",
}
@Article{Buzen:1976:TTT,
author = "J. P. Buzen",
title = "Tuning: tools and techniques",
journal = j-SIGMETRICS,
volume = "5",
number = "4",
pages = "63--81",
month = "Fall",
year = "1976",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041732.1041737",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:04 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tuning is basically a two stage process: the first
stage consists of detecting performance problems within
a system, and the second stage consists of changing the
system to correct these problems. Measurement tools
such as hardware monitors, software monitors and
accounting packages are typically used in the first
stage, and tools such as optimizers, simulators and
balancers are sometimes used in the second stage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1977:WSA,
author = "Mitchell G. Spiegel",
title = "Workshop summary: `Applications of queuing models to
{ADP} system performance prediction'",
journal = j-SIGMETRICS,
volume = "6",
number = "1",
pages = "13--33",
month = "Winter",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1044829.1044830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop was held on the Applications of Queuing
Models to ADP System Performance Prediction on 7-8
March 1977 at the National Technical Information
Service in Springfield, VA. Topics were divided into
four general areas: (1) Application of Queuing Models
to Feasibility and Sizing Studies, (2) Application of
Queuing Models to System Design and Performance
Management, (3) Queuing Model Validation and (4) New
Queuing Model Implementations. Mr Philip J. Kiviat,
Chairman, SIGMETRICS, made the welcoming remarks. As
Workshop Chairman, I provided a historical overview of
queuing model use which traced the development of the
application of queuing models to ADP system performance
prediction through the 20th century, while setting the
stage for each speaker's talk.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerman:1977:TWF,
author = "L. Hellerman",
title = "A table of work formulae with derivations and
applications",
journal = j-SIGMETRICS,
volume = "6",
number = "1",
pages = "35--54",
month = "Winter",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1044829.1044831",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Formulae for the work of certain common simple
computational steps are derived. The evaluation is in
terms of an information theoretic measure. The results
are then applied to evaluate the work of multiplication
and division, and the work of the IBM S/370 branch and
link instruction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allen:1977:NES,
author = "R. C. Allen and S. R. Clark",
title = "A note on an empirical study of paging on an {IBM
370\slash 145}",
journal = j-SIGMETRICS,
volume = "6",
number = "1",
pages = "55--62",
month = "Winter",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1044829.1044832",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:12 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A summary is presented of the paging activity observed
for various programs executing on a System/370 model
145 using OS/VSI (Release 2.0). Paging activity was
measured by periodic sampling of the queues involved in
real storage page management and by inspection of page
traffic counters maintained by the operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morrison:1977:ASC,
author = "Robert L. Morrison",
title = "Abstracts from the 1977 {SIGMETRICS\slash CMG VIII}
conference",
journal = j-SIGMETRICS,
volume = "6",
number = "2",
pages = "3--21",
month = "Spring",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041750.1041751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazos:1977:FDW,
author = "Constantine Lazos",
title = "Functional distribution of the workload of a linked
computer system and its simulation",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "5--14",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consideration is given to a possible functional
distribution of the workload over two linked computers
with separate channel access to a large disc store,
into the resource utilisation of the linked system
achieved by simulation using a modified and re-entrant
single processor simulator. Results suggest that the
proposed distribution realises a high utilisation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "compilation; disc channel traffic; hardware
utilisation; I/O buffers; in process; linked computer
system; multiprocessing; out process; simulation; trace
driven; work load",
}
@Article{Scheer:1977:COM,
author = "A.-W. Scheer",
title = "Combination of an optimization model for hardware
selection with data determination methods",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "15--26",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041755",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The selection of an EDP configuration often fixes a
firm to a single manufacturer for a long time and the
capabilities of the chosen computer will continually
influence the firm's organization. Only few approaches
exist to give assistance to the investors by developing
useful decision models based on the investment theory
/11, 12/. The hardware selection methods /4, 13/ used
up to now, like benchmark tests, don't meet these
demands. In this paper an investment model based on
mathematical programming is developed which considers
the aspects of investment for hardware selection.
Nevertheless, the present methods stay valid because
their output can be used as delta input for the
optimization model. Therefore, a concept is proposed
which combines these methods with an optimization
model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berinato:1977:AMT,
author = "Terence Berinato",
title = "An analytical model of a teleprocessing system",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "27--32",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A queuing model has been developed to study the
performance and capacity of a casualty insurance
teleprocessing system. This paper presents the salient
features of the system itself, relates those features
to basic queuing theory algorithms, outlines the basic
model construction, and discusses the validation
results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chanson:1977:SSA,
author = "Samuel T. Chanson and Craig D. Bishop",
title = "A simulation study of adaptive scheduling policies in
interactive computer systems",
journal = j-SIGMETRICS,
volume = "6",
number = "3",
pages = "33--39",
month = "Summer",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041753.1041757",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:19 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, some work has been done in the area of
dynamically adaptive scheduling in operating systems
(i.e., policies that will adjust to varying workload
conditions so as to maximize performance) [4],[5],
[10], [11]. However, most studies deal with
batch-oriented systems only. The University of British
Columbia operates an IBM 370/168 running under MTS
(Michigan Terminal System) which is principally used
interactively. It has been known for some time that the
system is Input/Output bound. The main goal of this
work is to determine to what extent adaptive control,
particularly as related to processor scheduling, can
improve performance in a system similar to U. B. C.'s.
Simulation is used throughout the study and because of
this, the simulator and the workload are described in
some detail. The target machine is a somewhat
simplified version of the U.B.C. System.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ziegler:1977:DST,
author = "Kurt Ziegler",
title = "A data sharing tutorial",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "3--7",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041760",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial is intended to acquaint the reader with
the issues of DATA SHARING and to develop an
understanding for the implications of such facilities
in the topic of integrity, performance, and recovery.
Some future concerns are also discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scott:1977:PDP,
author = "Shirley E. Scott",
title = "Pricing {D.P.} products: a timesharing
implementation",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "8--12",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Periodically, vending Data Processing organizations
are faced with the task of establishing service rates
for a resources provided to Customers. Sigmetrics'
Technical Meeting on Pricing Computer Services
(November, 1975) is a good indicator of the amount and
variety of interest the topic generates. The
proceedings from that meeting were a key source of
reference for the formulation and implementation of a
pricing strategy and automated model in one of Xerox's
timesharing data centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sarzotti:1977:TTS,
author = "Alain Sarzotti",
title = "Transactional terminal system on micro-processor: a
method for identifying \& modeling overall
performance",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "13--22",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A typical banking, financial and administrative system
involves specific characteristics: a large number of
devices around a processor, with several different
kinds of work stations (displays, keyboards, printers,
badge and document readers \ldots{}), a heterogeneous
workload (by linkage of specialized micro-transactions
using local or remote files), versatile operating
facilities on displays for untrained administrative
personnel (form-loading on the display, selecting key
words, spotting errors, generating operational messages
\ldots{}), and working with several sets of typical
functions (savings operations, cheque accounting, fund
transfer, deposits, withdrawals, and mainly data
entry).In this case it was mandatory to approach the
system performance evaluation study by first building
and observing a typical workload model in the forecast
operating environment. Measurement steps were then
scheduled from outside to inside operating procedures
to get analysis from the user's point of view (a bank
teller's operations, for example).Then, overall
performance results were derived by direct measurement,
which established relationships between throughput,
response time, processor overhead, and space and time
parameters related to system behavior. That was done by
progressively increasing the number of terminals and
exercising the workload on two levels of technical and
functional saturation. Simultaneously, a simulation
model used the same description of the workload, and
after validation with the preceding direct measurement
results, was used to extend the previous relationships
on various systems. (The full range of Erlang
distribution parameters is assumed with unknown
servers; the trace-driven method was not possible.)The
final results are shown in tables and charts which
exhibit system boundaries, providing useful guidelines
for designing network stations and performing workload
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bazewicz:1977:UMP,
author = "Mieczyslaw Bazewicz and Adam Peterseil",
title = "Use of modelling in performance evaluation of computer
systems: a case of installations in the {Technical
University of Wroclaw}",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "22--26",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is a number of models of user behaviour applied
in modelling studies on computer system performance
predictions. The models in most cases can be called
`resources-demands models', where users are only
considered as resources consumers. Some authors build
more sophisticated models --- concerning user
psychological features. The paper discusses some of the
users' models and their applicability in modelling and
design of operating systems for computers. Some
examples being the result of the research carried in
the Technical University of Wroclaw, concerning complex
users' model and performance evaluation of operating
systems by simulation are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Orchard:1977:NMC,
author = "R. A. Orchard",
title = "A new methodology for computer system data gathering",
journal = j-SIGMETRICS,
volume = "6",
number = "4",
pages = "27--41",
month = "Fall",
year = "1977",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041759.1041764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many computer system monitoring, data gathering, and
reduction efforts ignore unbiased sampling techniques.
The approaches generally taken are expensive and can
make no scientifically based statement about the
accuracy of the data gathered or consequent data
reduction. The approach outlined in this paper attempts
to correct these inadequacies by using the theory of
random sampling. Several new techniques are introduced
for obtaining optimal error bounds for estimates of
computer system quantities obtained from random
samples. A point of view is taken (boolean variable
random sampling) which makes it unnecessary to have any
a priori knowledge of the population parameters of the
phenomena being sampled. It is expected that the
techniques introduced will significantly reduce
monitoring overhead for computer systems while
increasing the quality of the data gathered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "boolean random sampling; computer system monitoring;
data gathering",
}
@Article{Underwood:1978:HPE,
author = "Mark A. Underwood",
title = "Human performance evaluation in the use of federal
computer systems: recommendations",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "6--14",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041767",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been increased awareness in recent years of
the high cost of non-hardware items in the Federal ADP
budget in contrast with decreasing costs for much of
the hardware. More attention is being given to software
development costs, systems design practices, automatic
program testing, and the like. Particular commercial
and military systems effectiveness and life cycle costs
now take into consideration such factors as part of the
planning process. It is suggested that not enough
attention has been given to measurement of human
performance variables as part of the systems
procurement and systems evaluation phases of Federal
ADP programs. Recommendations are made for the
incorporation of such measures along with conventional
hardware/software performance measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance; federal systems evaluations;
human performance measurements; psychology of computer
systems usage",
}
@Article{Jain:1978:GSA,
author = "Aridaman K. Jain",
title = "A guideline to statistical approaches in computer
performance evaluation studies",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "18--32",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anonymous:1978:PSQ,
author = "Anonymous",
title = "{Proceedings of the Software Quality and Assurance
Workshop}",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "32--32",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041769",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Honig:1978:DPA,
author = "Howard P. Honig",
title = "Data path analysis: analyzing large {I/O}
environments",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "34--37",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041770",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As data centers grow in complexity and size, vast
amounts of data (I/O) is transferred between
peripherals and CPU's. Data Path Analysis (DPA) is a
technique developed to report the utilization of CPU's,
channels, control units, and disks during data
transfer. Simply put, the technique analyzes data
paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sauer:1978:SRP,
author = "C. H. Sauer and E. A. MacNair",
title = "Simultaneous resource possession in queueing models of
computers",
journal = j-SIGMETRICS,
volume = "7",
number = "1--2",
pages = "41--52",
month = "Spring-Summer",
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041766.1041771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Neglect of simultaneous resource possession is a
significant problem with queueing network models of
computers. This is illustrated by examples of memory
contention and channel contention with position sensing
I/O devices. A class of extended queueing networks is
defined to allow representation of simultaneous
resource possession. Extended queueing network models
of memory contention and channel contention are given.
Solution techniques and numerical results for these
models are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel contention; hierarchical decomposition; memory
contention; performance evaluation; queueing networks;
regenerative simulation; response time",
}
@Article{Pfau:1978:AQA,
author = "Pamela R. Pfau",
title = "Applied quality assurance methodology",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "1--8",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What is the charter of a Quality Assurance (Q.A.)
department? What are the activities? How are they
undertaken? What is the impact of Quality Assurance
upon a software product? The structure and operating
philosophy of the department are explained in this
report as is the definition of the work cycle as
applied to a new release of a software product.
Comments are made about the interaction between
departments: product development, product maintenance,
publications, education, field support, product
management, marketing, product distribution and quality
assurance. While this is a description of the
activities of a company involved in developing and
marketing software products, the concepts apply to
techniques and practices which would also be beneficial
to any data processing department that develops
in-house application software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bersoff:1978:SCM,
author = "Edward H. Bersoff and Vilas D. Henderson and Stan G.
Siegel",
title = "Software Configuration Management",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "9--17",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811093",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is about discipline. It is about discipline
that managers should apply to software development. Why
is such discipline needed? Quite simply because the
software industry has traditionally behaved in an
undisciplined manner --- doing its own thing. The
products that the industry has turned out have
typically Contained other than what was expected
(usually less, rather than more); Been delivered much
later than scheduled; Cost more than anticipated; Been
poorly documented; and If you have been involved in any
of the situations quoted above, then this paper may be
of some help. In short, if you are now, or intend to
be, a software seller or buyer, then you should benefit
from an understanding of Software Configuration
Management. Lest you think that you are not now, or
ever will be, a software seller or buyer --- keep in
mind that the recent technology explosion in electronic
component miniaturization has placed the era of
personalized computing at hand. In that context, nearly
everyone may be considered a potential seller or buyer
of software. This paper is about the discipline called
Software Configuration Management (SCM). The objective
of SCM is to assist the software seller in achieving
product integrity and to assist the software buyer in
obtaining a product that has integrity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glass:1978:CFL,
author = "Robert L. Glass",
title = "Computing failure: a learning experience",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "18--19",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computing people can learn from failure as well as
success. Most professional papers deal only with the
latter \ldots{} yet it is well known that some of our
most lasting learning experiences are based on failure.
This paper is a lighthearted, anecdotal discussion of a
computing failure, with an underlying message that
sharing the sometimes embarrassing truths about What
Goes Wrong In Our Field is at least as illuminating as
more serious discussions about Things That Look
Promising. There are some necessary defense mechanisms
to be dealt with in discussing failure. People who have
failed in general do not want the world to know about
it. Perhaps even more so, companies which have failed
also do not want the world to know about it. As a
result, the content of this paper is fictionalized to
some extent. That is, company names and people names
are creations of the author, and there are
corresponding distortions in some story details.
However, the computing meat of the paper, the basis for
the failure learning experience, is untouched.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Woodmancy:1978:SQI,
author = "Donald A. Woodmancy",
title = "A Software Quality Improvement Program",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "20--26",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811095",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In late 1976, the NCR Corporation undertook a large
scale Quality Improvement Program for a major set of
systems software. That software set included some 103
separate products totaling 1.3 million source lines. It
included several operating systems, several compilers,
peripheral software, data utilities and
telecommunications handlers. This paper will describe
that effort and its results. The research and planning
that were done to define the program will be described.
The means by which the program was implemented will be
discussed in detail. Finally, some results of the
program will be identified.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fujii:1978:CSA,
author = "Marilyn S. Fujii",
title = "A comparison of software assurance methods",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "27--32",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811096",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several methods are currently employed by software
developers to improve software quality. This paper
explores the application of three of these methods:
quality assurance, acceptance testing, and independent
verification and validation. At first glance these
methods appear to overlap, but a closer evaluation
reveals that each has a distinct objective and an
established set of procedures. The purpose of this
paper is to clarify the role of each of these methods
by examining their scope, organization, and
implementation in the software development process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sukert:1978:EMA,
author = "Alan N. Sukert and Amrit L. Goel",
title = "Error modelling applications in software quality
assurance",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "33--38",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the results of a two-phased
experiment conducted by Rome Air Development Center and
Syracuse University to demonstrate the potential
applicability of software error prediction models in
performing formalized qualification testing of a
software package. First, decisions based upon the
predictions of three software error prediction models
will be compared with actual program decisions for a
large command and control software development project.
Classical and Bayesian demonstration tests are used to
make accept/reject decisions about the software system.
Finally, the results of the two phases will be compared
and some conclusions drawn as to the potential use of
these predictive techniques to software quality
assurance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Duran:1978:TMP,
author = "Joe W. Duran and John J. Wiorkowski",
title = "Toward models for probabilistic program correctness",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "39--44",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811098",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Program testing remains the major way in which program
designers convince themselves of the validity of their
programs. Software reliability measures based on
hardware reliability concepts have been proposed, but
adequate models of software reliability have not yet
been developed. Investigators have recently studied
formal program testing concepts, with promising
results, but have not seriously considered quantitative
measures of the ``degree of correctness'' of a program.
We present models for determining, via testing, such
probabilistic measures of program correctness as the
probability that a program will run correctly on
randomly chosen input data, confidence intervals on the
number of errors remaining in a program, and the
probability that the program has been completely
tested. We also introduce a procedure for enhancing
correctness estimates by quantifying the error reducing
performance of the methods used to develop and debug a
program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yin:1978:EUM,
author = "B. H. Yin and J. W. Winchester",
title = "The establishment and use of measures to evaluate the
quality of software designs",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "45--52",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811099",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It has been recognized that success in producing
designs that realize reliable software, even using
Structured Design, is intimately dependent on the
experience level of the designer. The gap in this
methodology is the absence of easily applied
quantitative measures of quality that ease the
dependence of reliable systems on the rare availability
of expert designers. Several metrics have been devised
which, when applied to design structure charts, can
pinpoint sections of a design that may cause problems
during coding, debugging, integration, and
modification. These metrics can help provide an
independent, unbiased evaluation of design quality.
These metrics have been validated against program error
data of two recently completed software projects at
Hughes. The results indicate that the metrics can
provide a predictive measure of program errors
experienced during program development. Guidelines for
interpreting the design metric values are summarized
and a brief description of an interactive structure
chart graphics system to simplify metric value
calculation is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pierce:1978:RTT,
author = "Robert A. Pierce",
title = "A Requirements Tracing Tool",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "53--60",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811100",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A software development aid termed the Requirements
Tracing Tool is described. Though originally designed
to facilitate requirements analysis and thus simplify
system verification and validation, it has also proven
useful as an aid for coping with changing software
requirements and estimating their consequent cost and
schedule impacts. This tool provides system analysts
with a mechanism for automated construction,
maintenance, and access to a requirements data base ---
an integrated file containing all types and levels of
system requirements. This tool was used during the
development of a large Navy undersea acoustic sensor
system. It is presently being used to support the
Cruise Missile Mission Planning Project. An outline
version of this tool is under development.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Davis:1978:RLP,
author = "Alan M. Davis and Walter J. Rataj",
title = "Requirements language processing for the effective
testing of real-time systems",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "61--66",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "GTE Laboratories is currently developing a trio of
software tools which automate the feature testing of
real-time systems by generating test plans directly
from requirements specifications. Use of the first of
these tools, the Requirements Language Processor (RLP),
guarantees that the requirements are complete,
consistent, non-ambiguous, and non-redundant. It
generates a model of an extended finite-state machine
which is used by the second tool, the Test Plan
Generator, to generate test plans which thoroughly test
the software for conformity to the requirements. These
test plans are supplied to the third tool, the
Automatic Test Executor, for actual testing. The RLP is
the subject of this paper. The primary goal of the RLP
is to provide the ability to specify the features of a
target real-time system in a vocabulary familiar to an
application-oriented individual and in a manner
suitable for test plan generation. The RLP produces a
document which can be easily understood by non-computer
personnel. It is expected that this document will
function as a key part of the ``contract'' between a
real-time system supplier and a customer. This document
must also serve as a springboard for the software
designers during their development of the actual
product. In addition to the requirements document, the
RLP also produces an augmented state transition table
which describes a finite state machine whose external
behavior is identical to the target real-time system as
defined by the specified requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peters:1978:RSR,
author = "Lawrence Peters",
title = "Relating software requirements and design",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "67--71",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Software development is a process which has evolved
into a number of phases. Although the names of the
phases and some of their characteristics differ from
contractor to contractor and customer to customer, the
functional similarities among sets of phases cannot be
ignored. The basic software development scenario
depicted by these phases starts with problem
identification and definition, requirements
specification, design, code, test, and installation and
maintenance. Although some ``smearing'' of one phase
activity into other(s) may occur, this represents the
basic flow. However, it is just that smearing which
occurs between requirements and design that we wish to
explore here. Identifying or defining problems and
solving problems are viewed by many to be separate,
distinguishable activities. They are complementary in
that one identifies what must be done (requirements)
while the other depicts how it will be done (design).
But software designers complain bitterly that
requirements are poorly defined while customers and
analysts often complain that the design is not
responsive to the problem(s) as they perceive it.
Somehow software designers end up discovering
previously unknown requirements and end up solving a
problem which is foreign to the customer. Is there a
workable mechanism to reduce this difficulty?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stavely:1978:DFU,
author = "Allan M. Stavely",
title = "Design feedback and its use in software design aid
systems",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "72--78",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is argued that software system designers would
benefit greatly from feedback about the consequences of
a proposed design if this feedback could be obtained
early in the development process. A taxonomy of
possible types of feedback and other design aids is
presented, and the capabilities of several existing
design aid systems are described relative to this
taxonomy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yoder:1978:NSC,
author = "Cornelia M. Yoder and Marilyn L. Schrag",
title = "{Nassi--Shneiderman} charts an alternative to
flowcharts for design",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "79--86",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811104",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years structured programming has emerged as
an advanced programming technology. During this time,
many tools have been developed for facilitating the
programmer's use of structured programming. One of
these tools, the Structured Flowcharts developed by I.
Nassi and B. Shneiderman in 1972, is proving its value
in both the design phase and the coding phase of
program development. Several programming groups in
System Products Division, Endicott, New York, have used
the Nassi--Shneiderman charts as replacements for
conventional flowcharts in structuring programs. The
charts have been used extensively on some projects for
structured walk-throughs, design reviews, and
education. This paper describes the Nassi--Shneiderman
charts and provides explanations of their use in
programming, in development process control, in
walk-throughs, and in testing. It includes an analysis
of the value of Nassi--Shneiderman charts compared to
other design and documentation methods such as
pseudo-code, HIPO charts, prose, and flowcharts, as
well as the authors' experiences in using the
Nassi--Shneiderman charts. The paper is intended for a
general data processing audience and although no
special knowledge is required, familiarity with
structured programming concepts would be helpful. The
reader should gain insight into the use of
Nassi--Shneiderman charts as part of the total
development process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Benson:1978:SQA,
author = "J. P. Benson and S. H. Saib",
title = "A software quality assurance experiment",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "87--91",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811105",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An experiment was performed to evaluate the ability of
executable assertions to detect programming errors in a
real time program. Errors selected from the categories
of computational errors, data handling errors, and
logical errors were inserted in the program. Assertions
were then written which detected these errors. While
computational errors were easily detected, data
handling and logical errors were more difficult to
locate. New types of assertions will be required to
protect against these errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Assertions; Error categories",
}
@Article{Bauer:1978:AGE,
author = "Jonathan Bauer and Susan Faasse and Alan Finger and
William Goodhue",
title = "The automatic generation and execution of function
test plans for electronic switching systems",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "92--100",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811106",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A three phase functional testing methodology is
described for use in the development cycle of
electronic switching systems. The methodology centers
on a directed graph model of the system and provides
for the checking of system requirements, the generation
of functional tests and the automatic execution of
these tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martin:1978:SAT,
author = "K. A. Martin",
title = "Software acceptance testing that goes beyond the
book",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "101--105",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811107",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The design of software acceptance tests is as
important to meeting contract goals as is the design of
algorithms. This statement is particularly significant
on fixed price contracts with tight schedules. An
extreme instance of the demand placed on acceptance
testing can be found in software projects wherein the
only rigorous testing that required the Computer
Program Configuration Item (CPCI) to exercise its
repertoire of load and store instructions was the
Formal Qualification Test (FQT). This paper is about
such a project, the lessons learned from it, and
provides an effective test approach for fixed price
contracts. A word or two about the project is
appropriate to establish the context that underscores
the impact of the above assertion. Initially 30K (core
words), 16-bit program instructions were to be
developed within one year using a Varian 73 computer
with 32K words of memory for a Command and Control
application under a fixed price contract. A set of a
priori conditions existed that tended to convey the
impression that the inherent risks of this endeavor
were reasonable. They were the ``facts'' that: Of the
30K (core words) to be written, 30\% of this code
already existed and would be used. Contractor standards
would be allowed for documentation with limited use of
Military Specifications No formal Design Reviews or
audits would accompany the deliverable CPCI. Existent
executive software would suffice. A competent and
enthusiastic team was committed to the effort.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Drasch:1978:ITP,
author = "Frederick J. Drasch and Richard A. Bowen",
title = "{IDBUG}: a tool for program development",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "106--110",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811108",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The construction of a reliable computer program
requires, in part, a means of verification of its
component parts prior to their integration into the
overall system. The verification process may consist of
building a test harness to exercise or exhaustively
test a procedure. This technique is known as dynamic
testing. In practice, the application of dynamic
testing requires the coding of a special harness for
each procedure. This consumes valuable programming
time, as much as 50\% of the total effort (FAIR78). It
is also restrictive because the test harness cannot be
easily modified to test aspects of a program which it
was not originally designed to test. We have built a
facility called IDBUG that reduces the programming
effort required to employ dynamic testing by automating
the construction of the test harness. Additionally, it
provides an interactive test environment which permits
more flexible testing. This paper describes IDBUG and
discusses our experience in its application to
maintenance tasks in a commercial environment. Nyone of
the ideas put forth here will be especially novel;
dynamic testing as a software testing tool has been in
use for some time. What we hope to do is illustrate the
beneficial aspects of a particular application of
dynamic testing. It is argued that testing should play
a more limited role in assuring the reliability of
software in light of techniques such as structured
coding, top-down design, proof of correctness, etc.
(McG075). While it is true that eventually the ``art of
computer programming'' will become the ``science of
producing correct programs'', we believe that more
emphasis must be placed on interim solutions to aid in
the construction of reliable software. We present IDBUG
as such a solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stickney:1978:AGT,
author = "M. E. Stickney",
title = "An application of graph theory to software test data
selection",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "111--115",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811109",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph theory is playing an increasingly important role
in the design, analysis, and testing of computer
programs. It's importance is derived from the fact that
flow of control and flow of data for any program can be
expressed in terms of directed graphs. From the graph
representing the flow of control, called the program
graph, many others can be derived that either partially
or completely preserve the program control structure.
One derived graph known as a cyclomatic tree is of
particular value in program testing. It is so named
because the number of leaves of the tree is equal to
the cyclomatic number of the program graph. A thorough
treatment of cyclomatic numbers is provided in [3]. A
program called the Complexity/Path Analyzer (CPA) has
been developed that builds and utilizes a program
cyclomatic tree to provide test planning information,
automatically place software counters called probes as
discussed in [9] and [10] in a program, and provide
selected parameters such as program length and program
graph cyclomatic number. The paper discusses the
features and derivation of cyclomatic trees as well as
their value and application to testing and test data
generation. A cyclomatic tree provides a test planner
with information useful for planning program tests. In
particular, it furnishes test data selection criteria
for developing tests that are minimally thorough as
defined by Huang in [9]. A test data selection
criterion will be defined as minimally thorough if any
complete test with respect to the criterion is at least
minimally thorough. The term complete is used as
defined by Goodenhough and Gerhart in [13]. A test is
defined to be a non empty sequence of test cases. Each
test case consists of an element selected from the
input domain of the program being tested. The paper
discusses the merits of one particular technique
selected to achieve a minimally thorough test data
selection criteria. Part of the technique is automated
by the CPA program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fischer:1978:SQA,
author = "Kurt F. Fischer",
title = "Software quality assurance tools: {Recent} experience
and future requirements",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "116--121",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811110",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The objective of software quality assurance (QA) is to
assure sufficient planning, reporting, and control to
affect the development of software products which meet
their contractual requirements. To implement this
objective, eight QA functions can be identified: 1.
Initial quality planning 2. Development of software
standards and procedures 3. Development of quality
assurance tools 4. Conduct of audits and reviews 5.
Inspection and surveillance of formal tests 6.
Configuration verifications 7. Management of the
discrepancy reporting system 8. Retention of QA records
The purpose of this paper is to document experiences
gained in the use of selected QA tools that perform
some of the above functions, to discuss lessons
learned, and to suggest future needs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glasser:1978:ESC,
author = "Alan L. Glasser",
title = "The evolution of a {Source Code Control System}",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "122--125",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811111",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Source Code Control System (SCCS) is a system for
controlling changes to files of text (typically, the
source code and documentation of software systems). It
is an integral part of a software development and
maintenance system known as the Programmer's Workbench
(PWB). SCCS has itself undergone considerable change.
There have been nine major versions of SCCS. This paper
describes the facilities provided by SCCS, and the
design changes that were made to SCCS in order to
provide a useful and flexible environment in which to
conduct the programming process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Josephs:1978:MCB,
author = "William H. Josephs",
title = "A mini-computer based library control system",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "126--132",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811112",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the major problems encountered in any large
scale programming project is the control of the
software. Invariably, such large programs are divided
into many smaller elements since these are easier to
code, test and document. However, such a division adds
new complexity to the task of Configuration Management
since the many source modules, data base elements, JCL
(Job Control Language) and DATA files must be
controlled with the goal of maximizing program
integrity and minimizing the chances of procedural
errors. Furthermore, whenever any program is released
either for field test or for final production, an
entire change control procedure must be implemented in
order to trace, install, debug and verify fixes or
extensions to the original program. These maintenance
activities can account for up to 80 percent of the
entire programming cost in a large, multi-year project.
The library control program (SYSM) presented here was
developed to aid in these processes. It has facilities
for capturing all elements of a program (commonly
called baselining), editing any element or group of
elements that have been baselined to build an updated
version of the program, adding and/or deleting elements
of a program, and listing the current contents of a
given element or elements. SYSM is written mainly in
FORTRAN, and runs on a Hewlett--Packard HP-21MX
computer with two tape drives, the vendor supplied
RTE-II or RTE-III operating system, and at least 16K of
user available core. It can be used to control code
targeted for either the HP21MX itself, or, using the
optional HP/LSI-11 link program, code targeted for a
Digital Equipment Corp. LSI-11 system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cavano:1978:FMS,
author = "Joseph P. Cavano and James A. McCall",
title = "A framework for the measurement of software quality",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "133--139",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811113",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Research in software metrics incorporated in a
framework established for software quality measurement
can potentially provide significant benefits to
software quality assurance programs. The research
described has been conducted by General Electric
Company for the Air Force Systems Command Rome Air
Development Center. The problems encountered defining
software quality and the approach taken to establish a
framework for the measurement of software quality are
described in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cobb:1978:MSU,
author = "Gary W. Cobb",
title = "A measurement of structure for unstructured
programming languages",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "140--147",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811114",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Software Science is a field of Natural Science which
deals with the development of measurements which reveal
properties of software programs. These measurements are
qualified as to their degree of correlation to human
beings being able to construct or understand a subject
program. Maurice Halstead has pioneered much of the
theories in this field ((5) through (10)), which
applies statistical and psychological testing
techniques to the evaluation of the measurements. The
basic inputs to the Halstead predictors are easily
measured: the number of distinct operators and
operands, and the number of occurrences of the
operators and operands. Due to the statistical nature
of the measurements, there can be erroneous results
when applying them to small sample spaces. However, the
predictors are very adequate when applied to large
samples, that is, large software systems. In an
excellent review article by Fitzsimmons and Love (4),
it is pointed out that several of the estimators
defined by Halstead assumed that the subject programs
were well-structured, and inaccuracy in the predictors
can result if they are applied to `unpolished'
programs. In fact, Halstead qualified six classes of
impurities in code which can cause the length predictor
to be inaccurate. The definition of volume for
software, another predictor introduced in Halstead's
book, is related to the level of the specification of
the program. An algorithm which is written in assembly
language will have a greater volume than the same
algorithm written in Pascal, due to the richness of the
semantic constructs that are available in the
higher-level languages. Hence, this predictor is
language dependent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bowen:1978:CAS,
author = "John B. Bowen",
title = "Are current approaches sufficient for measuring
software quality?",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "148--155",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811115",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Numerous software quality studies have been performed
over the past three years-mostly sponsored by the Rome
Air Development Center. It is proposed by the author
that more emphasis should be placed on devising and
validating quantitative metrics that are indicative of
the quality of software when it is being designed and
coded. Such measures could be applied effectively, as
relative guidelines without formal validation. However
for such measures to be predictive of the quality of
the delivered software, they must be validated with
actual operational error data or data gathered in a
simulated operational environment. This paper includes
a review of proposed metrics from the literature a
report of a Hughes intramodule metric study, and
recommendations for refining proposed software quality
assurance criteria.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lockett:1978:UPM,
author = "Joann Lockett",
title = "Using performance metrics in system design",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "156--159",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007775.811116",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Complexities of system design are great and often lead
designers to be inward looking in their analyses.
Knowledge from various fields can be of benefit in
designing systems [1]. Management accountants can
describe economic effects of delays in closing
schedules, psychologist can provide significant
insights into the behavioral characteristics of users
to complex command syntax, computer performance
analysts can provide alternatives to describe and to
measure responsiveness of systems. Even in the case of
an innovative system design, the designer can employ
such approaches to identify incipient problems and
create alternatives with increased cost effectiveness.
This paper describes how performance metrics can be
used effectively to support system design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Southworth:1978:RM,
author = "Richard N. Southworth",
title = "Responding to {MIL-S-52779}",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "160--164",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811117",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The art and science of computer software development
is still changing considerably from year to year, and
therefore lacks the established control mechanisms of
hardware production programs. Also, because most
software is produced in a one-time development program
it does not lend itself to the established discrepancy
detection and correction techniques used in hardware
production programs. Consequently, the software QA
program must provide the methodology to detect a
deficiency the first time it occurs and effect
corrective action. MIL-S-52779: ``Software Quality
Assurance Program Requirements,'' has provided a much
needed impetus for software development contractors to
develop software QA techniques. But much remains to be
done. As the state of the art advances MIL-S-52779
should be revised accordingly. In this paper the author
responds to the present form of the specification,
suggests some revisions and additions and briefly
discusses a set of QA procedures that should be
responsive (fully compliant) with MIL-S-52779.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tighe:1978:VPS,
author = "Michael F. Tighe",
title = "The value of a proper software quality assurance
methodology",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "165--172",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/800283.811118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the experiences of a project
development team during an attempt to ensure the
quality of a new software product. This product was
created by a team of software engineers at Digital
Equipment Corporation, a mainframe manufacturer. As a
result, the definition of ``to ensure the quality of a
software product'' meant minimizing the maintenance
costs of the new product. Ease of maintenance and a low
bug rate after release to the customer were very
important goals from the beginning of the project. This
paper compares the degree of application and resultant
effects of several software quality assurance
methodologies upon different parts of the final
product. Many of the product's subsystems were created
using all of the discussed methodologies rigorously.
Some subsystems were created with little or no use of
the methodologies. Other subsystems used a mixture. The
observed quality of the various subsystems when related
to the methodology used to create them provides
insights into the interactions between the
methodologies. These observations also supply
additional experience to reinforce established beliefs
concerning the value of quality assurance
methodologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Belford:1978:QEE,
author = "Peter Chase Belford and Carlo Broglio",
title = "A quantitative evaluation of the effectiveness of
quality assurance as experienced on a large-scale
software development effort",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "173--180",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of quality assurance on software projects
is to achieve high quality products on schedule, within
cost, and in compliance with contract requirements.
However, historically, the effectiveness of these
activities on software projects has not been
quantitatively demonstrable because of a lack of data
collected on the project combined with a lack of
insight into the operational reliability of the system.
Quality assurance is a collection of activities on a
contractual deliverable whose purpose is to impart a
degree of confidence that the deliverable will conform
to the customer's concept of what was procured. Under
these conditions, quality assurance must be performed
with respect to a documented baseline of the concept.
This baseline can address the need in the form of
requirement statements; the conceptual approach to be
followed in the form of a functional specification; or
the design to be implemented in the form of a design
specification. Further, these baselines are
hierarchical in the sense that when quality assurance
is applied to a level it is implicitly applied to all
lower levels; e.g., if the need is to be satisfied, the
conceptual approach must be satisfied. Effective
quality assurance programs impart a high degree of
confidence to the customer without significant impacts
on schedule or cost. Historically, this effectiveness
has not been quantitatively demonstrable because of a
lack of data collected on the project combined with a
lack of insight into the operational reliability of the
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kacik:1978:ESQ,
author = "Paul J. Kacik",
title = "An example of software quality assurance techniques
used in a successful large scale software development",
journal = j-SIGMETRICS,
volume = "7",
number = "3--4",
pages = "181--186",
month = nov,
year = "1978",
CODEN = "????",
DOI = "https://doi.org/10.1145/953579.811120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:52:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Development of the software package for the Combat
Grande Air Defense System was considered by the Hughes
Aircraft Company to be highly successful in that a
reliable system was produced that met customer
requirements and was completed within time and budget
allocations --- a feat not often attained in large
scale software developments. Much of the success can be
attributed to the software quality assurance (QA)
techniques used. Some of these QA techniques are listed
in Table 1 along with the phases in which they were
used. This paper describes these QA techniques in some
detail, as well as those aspects of the system and
software development program that permitted these
techniques to be used effectively. Background
information is presented first which describes the
system, software, organization and software
configuration management. This is followed by a
description of the three major phases of software
development. The overall results are then presented,
followed by recommended improvements and conclusions.
Many of the QA techniques listed in Table 1 were used
in several phases of software development. However, a
particular technique is discussed only in the phase in
which it was most extensively used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kreutzer:1979:CSM,
author = "Wolfgang Kreutzer",
title = "Computer system modelling and simulation",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "9--35",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To evaluate the suitability and limitations of
software for computer systems modelling, a basic
comprehension of the structure of such tools must be
provided. A brief discussion of conceptual requirements
for the description of discrete models, and computer
system models in particular, is followed by a survey of
commercially available computer simulation packages.
Special and general purpose discrete event simulation
and general purpose programming languages are also
analysed for their suitability for this class of
applications. The survey closes with some
recommendations and guidelines for selection and
application of computer system simulation tools. To aid
the analyst contemplating a computer system modelling
project, a brief list of relevant addresses and
annotated references is also included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Turner:1979:ISM,
author = "Rollins Turner",
title = "An investigation of several mathematical models of
queueing systems",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "36--44",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of simple mathematical models were used to
predict average response time of a timesharing system.
The target system was a very simple trace driven
simulation model, but the workloads were trace files
obtained from a real system in normal operation. As
such, the workloads were characterized by very high
coefficients of variation in resource demands and think
times. Mathematical models of the system included
independent arrival models (M/M/1 and M/G/1, closed
network models) admitting product from solutions, and a
more general Markov model. Only the final model
produced reasonable accuracy. A number of experiments
were performed, in an effort to determine what
properties of the system being modeled were responsible
for the failure of all the simple mathematical models.
The large variance in CPU time and the fact that the
system was a closed network were found to be critical
factors, and appeared to be the major causes for
failure of models that do not take them into account.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sauer:1979:CIQ,
author = "Charles H. Sauer",
title = "Confidence intervals for queueing simulations of
computer systems",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "45--55",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulation models of computer systems may be
formulated as queueing networks. Several methods for
confidence interval estimation for queueing simulations
are discussed. Empirical studies of these methods are
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleijnen:1979:NCS,
author = "Jack P. C. Kleijnen",
title = "A note on computer system data gathering",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "56--56",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently Orchard (1977) proposed a statistical
technique for data collection in computer systems. A
main idea was the use of random sampling, as opposed to
traditional fixed periodic sampling. He further
proceeded to derive confidence intervals for the
resulting estimator. He also proposed the use of binary
(Boolean) variables, e.g., $ q_{it} = 1 $ (or $0$) if
at sampling time $t$ the $i$ th `slot' of a queue is
occupied (or empty respectively).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1979:PPV,
author = "M. K. Rajaraman",
title = "Performance prediction of a virtual machine",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "57--62",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modeling and simulation of computer systems have two
main objectives. First, to evaluate the performance of
a given configuration of a machine and second, to
derive a mechanism for prediction of performance when
configuration parameters change. This paper addresses
the second issue and reports the result of a recent
investigation of a Virtual Memory Computer. The results
indicate which variables or combination of variables
have significant effect on the performance and which do
not.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jain:1979:GSA,
author = "Aridaman K. Jain",
title = "A guideline to statistical approaches in computer
performance evaluation studies",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "63--77",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schwartz:1979:DCC,
author = "E. Schwartz",
title = "Development of credible computer system simulation
models",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "78--95",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Problems encountered during a simulation effort
are influenced by the objectives of the simulation.
Verification and validation of the simulation model are
two such problems which affect the credibility (and
usability) of the model. A simulation methodology for
Program Design Analysis is described. The goal of this
simulation application is to test a design before it is
implemented. Techniques are described which enhance the
credibility of simulation models. The relationship
between Program Design Analysis and the reliability of
the system being developed is explored.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1979:CPE,
author = "Jon D. Clark and Thomas J. Reynolds and Michael J.
Intille",
title = "Computer performance evaluation: an empirical
approach",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "97--101",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance evaluation can be delineated into
the areas of selection, projection and monitoring. The
tuning of existing systems for efficient performance
may be viewed as a special case of the projection
activity involving modeling, statistics collection and
analysis. Mosts tools available today are expensive to
use and overly complicated. This paper presents the
comparison of two, relatively simple and
cost-effective, statistical techniques for performance
evaluation: regression and canonical analysis. In
addition, the results of the suggested and implemented
computer configuration modification is reported.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "canonical analysis; computer performance evaluation;
multi-processor; regression analysis",
}
@Article{Willis:1979:TSW,
author = "Ron Willis",
title = "Techniques in simulation which enhance software
reliability",
journal = j-SIGMETRICS,
volume = "8",
number = "1--2",
pages = "102--115",
month = "Spring-Summer",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041853.1041862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A simplified simulation study of an actual software
development effort is presented. A model is developed
and exercised through various stages of modifications
to an originally unreliable soft ware design until
viable software design results. Techniques in model
development, simulation, analysis, and language
capability which lead to enhanced software reliability
are discussed. Uniquenesses in the approach presented
are contrasted to simulation methods which lack this
capability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1979:TSM,
author = "Russ Blake",
title = "{Tailor}: a simple model that works",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "1--11",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805444",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tailor is an atomic model of the Tandem/16
multiple-computer system. Atomic modeling is based on
operational analysis and general considerations from
queueing theory. Measurements of system atoms define
the underlying components of processor usage. The
workload is described to the model through a separate
set of measurable parameters that comprise the workload
atoms. Simple formulae from operational analysis are
then applied to predict the amount of equipment
necessary to support the projected application.
Tailor's accuracy was tested under two very different
workloads. For both a large backend database
application and a program development system, Tailor
was able to predict the equipment needed to handle the
workloads to within 5 percent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Strecker:1979:ACP,
author = "William D. Strecker",
title = "An analysis of central processor-input-output
processor contention",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "27--40",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805445",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most computer systems have separate central (CPU) and
input-output (IOP) processors to permit simultaneous
computation and input-output (I/O). It is conventional
in such systems to avoid any loss of I/O data by
granting the IOP priority over the CPU for memory
service. Although this priority discipline is simple to
implement it may result in a maximum degradation of CPU
performance. In this discussion an analysis of the IOP
priority discipline is given together with an analysis
of other priority disciplines which require the
buffering of IOP requests and results are given showing
that only a small amount of buffering is required to
produce a noticeable improvement in CPU performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Contention; CPU; I/O interference; Input-output;
Memory system; Priority discipline; Processor",
}
@Article{Wiecek:1979:PST,
author = "Cheryl A. Wiecek and Simon C. {Steely, Jr.}",
title = "Performance simulation as a tool in central processing
unit design",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "41--47",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805446",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance analysis has always been considered
important in computer design work. The area of central
processing unit (CPU) design is no exception, where the
successful development of performance evaluation tools
provides valuable information in the analysis of design
tradeoffs. Increasing integration of hardware is
producing more complicated processor modules which add
to the number of alternatives and decisions to be made
in the design process. It is important that these
modules work together as a balanced unit with no hidden
bottlenecks. This paper describes a project to develop
performance simulation as an analysis tool in CPU
design. The methodology is first detailed as a three
part process in which a performance simulation program
is realized that executes an instruction trace using
command file directions. Discussion follows on the
software implemented, applications of this tool in CPU
design, and future goals.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bennett:1979:SDS,
author = "David A. Bennett and Christopher A. Landauer",
title = "Simulation of a distributed system for performance
modelling",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "49--56",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805447",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A distributed system of cooperating minicomputers is
simulated by AIMER (Automatic Integration of Multiple
Element Radars) to model and analyze the behavior of a
radar tracking system. Simulation is applied in the
AIMER project in an attempt to model a network of
minicomputers to discover a maximally flexible network
architecture. Because building the tracking system out
of real hardware would not result in a flexible enough
testbed system, the proposed configuration is
represented by a software emulation. The instruction
sets of the individual processors are emulated in order
to allow separation of the measurement facilities from
the execution of the system. The emulation is supported
by a Nano-data QM-1 micro and nano-programmable host.
Extensive performance monitoring hooks have been built
into the emulation system which allow small performance
perturbations to become visible. The tracking network
is controlled by a combination firmware operating
system and a special emulated virtual control machine.
The tracking algorithms run on virtual machines whose
instruction sets and computational throughput can be
parameterized when the model is generated, or
dynamically by an operator during a run. The radar and
ground truth environments for the tracking system are
simulated with logic resident in one of the emulated
machines, allowing these functions to be monitored as
accurately as the tracking algorithms. The use of this
simulation technique has resulted in an extremely
flexible testbed for the development of distributed
radar tracking system models. The testbed itself can be
quickly tailored to other application problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazowska:1979:BTA,
author = "Edward D. Lazowska",
title = "The benchmarking, tuning and analytic modeling of
{VAX\slash VMS}",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "57--64",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805448",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a recent experience in
benchmarking, tuning and modelling Digital Equipment
Corporation's VMS executive running on their VAX-11/780
computer. Although we emphasize modelling here, the
three aspects are closely interrelated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marshall:1979:AMW,
author = "William T. Marshall and C. Thomas Nute",
title = "Analytic modelling of ``working set like'' replacement
algorithms",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "65--72",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805449",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although a large amount of theoretical work has been
performed in the analysis of the pure working set
replacement algorithm, little has been done applying
these results to the approximations that have been
implemented. This paper presents a general technique
for the analysis of these implementations by analytic
methods. Extensive simulations are reported which
validate the analytic model and show significant
simplifications that can be made with little loss of
accuracy. The problem of choosing memory policy
parameter values is examined and related in a simple
way to the choice of a working set window size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Briggs:1979:EBM,
author = "Fay{\'e} A. Briggs",
title = "Effects of buffered memory requests in multiprocessor
systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "73--81",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805450",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A simulation model is developed and used to study the
effect of buffering of memory requests on the
performance of multiprocessor systems. A multiprocessor
system is generalized as a parallel-pipelined processor
of order $ (s, p) $, which consists of $p$ parallel
processors each of which is a pipelined processor with
$s$ degrees of multiprogramming, there can be up to $
s*p$ memory requests in each instruction cycle. The
memory, which consists of $ N ( = 2^n)$ identical
memory modules, is organized such that there are $ \ell
( = 2^i)$ lines and $ m ( = 2^{n - i})$ identical
memory modules, where each module is characterized by
the address cycle (address hold time) and memory cycle
of $a$ and $c$ time units respectively. Too large an $
\ell $ is undesirable in a multiprocessor system
because of the cost of the processor-memory
interconnection network. Hence, we will show how
effective buffering can be used to reduce the system
cost while effectively maintaining a high level of
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raffi:1979:ECB,
author = "Abbas Raffi",
title = "Effects of channel blocking on the performance of
shared disk pack in a multi-computer system",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "83--87",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805451",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a multi-computer environment where several
computers share packs of disk drives, the architecture
of the disk controller can have significant effect on
the throughput of the disk pack. In a simple
configuration a controller can allow access to only one
disk in the pack at a time, and effectively block other
channels from accessing other disks in the pack. A
desirable alternative is to be able to access different
disks of the same pack simultaneously from different
channels. Motivated by the presence of a mixed hardware
in an installation to support both configurations, an
attempt is made to model each system and produce
analytical and simulation results to compare their
relative performances. It is predicted that under the
prevalent conditions in the installation, a complete
switchover to either system should not give rise to
significant performance change.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zahorjan:1979:ESM,
author = "John Zahorjan",
title = "An exact solution method for the general class of
closed separable queueing networks",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "107--112",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805452",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a convolution algorithm for
the full class of closed, separable queueing networks.
In particular, the algorithm represents an alternative
method to those already known for the solution of
networks with class changes, and is the first efficient
algorithm to deal with Lam-type networks [11]. As an
application of the algorithm, we study a simple
queueing network with disk I/O devices connected to a
single CPU through a single channel. The algorithm is
then used to develop a simple, accurate approximation
for the blocking of disk devices that takes place when
a customer using a disk is waiting for or in service at
the channel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kienzle:1979:SAQ,
author = "Martin G. Kienzle and K. C. Sevcik",
title = "Survey of analytic queueing network models of computer
systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "113--129",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805453",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of case studies involving the use of queueing
network models to investigate actual computer systems
are surveyed. After suggesting a framework by which
case studies can be classified, we contrast various
parameter estimation methods for specifying model
parameters based on measurement data. A tabular summary
indicates the relationships among nineteen case
studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Landry:1979:SEP,
author = "Steve P. Landry and Bruce D. Shriver",
title = "A simulation environment for performing dataflow
research",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "131--139",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805454",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dataflow languages and processors are currently being
extensively studied because of their respective ability
to specify and execute programs which exhibit a high
degree of parallel and/or asynchronous activity [12,
7]. This paper describes a comprehensive simulation
environment that allows for the execution and
monitoring of dataflow programs. One overall objective
of this facility was to meet the needs of researchers
in such diverse areas as computer architecture,
algorithm analysis, and language design and
implementation. Another objective was to accommodate
the semantics of several of the contending abstract
dataflow models [2, 4]. Additionally, it was desired to
enhance the abstract dataflow models which the
simulator would support. These objectives, combined
with the desired debugging and metering requirements,
directed the design of the overall system. A brief
introduction to dataflow and its related terminology is
given to assist the reader. A companion paper [6]
describes an augmentation to the basic simulation
facility presented here that allows for the execution
of dataflow programs on processors having finite
resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Langan:1979:SED,
author = "David D. Langan and Bruce D. Shriver",
title = "Simulated execution of dataflow programs on processors
having finite resources",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "141--149",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805455",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dataflow languages and processors are currently being
extensively studied because they provide for the
specification and realization of processes exhibiting a
high degree of parallel and/or asynchronous activity
[12, 8]. Several researchers have developed simulators
for specific candidate dataflow architectures in which
there are essentially an infinite number of resources
available to the nost machine [9, 1]. This is done to
study the degree of parallelism which is achievable
with a given version of an algorithm. However, it is an
equally important (and neglected) area to study the
behavior of programs executing in candidate computer
systems having a finite amount of resources. This paper
presents results which have been obtained from such
modeling. It is shown that in such a system certain
``critical nodes'' must be given priority of execution
when competing with other nodes for the same resources
in order to achieve the maximum system throughput. It
is suggested that the abstract dataflow model be
modified to accommodate such situations. Various design
trade-offs associated with the implementation of the
simulator are discussed along with a description of
available features. A companion paper [6] describes the
general dataflow simulation facility which provided the
basis of this work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Unger:1979:OSI,
author = "Brian W. Unger and James R. Parker",
title = "An operating system implementation and simulation
language {(OASIS)}",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "151--161",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805456",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An approach to the implementation and simulation of
system software for multicomputer architectures is
described. OASIS, a variant of the SIMULA 67 language,
provides tools for both hardware modelling and system
software development. The latter includes an extensible
module type with flexible intermodule access control.
Hardware is characterized at the processor/memory level
so that system software resource control and allocation
policies can be implemented at a functional level.
Concurrent module execution by multiple processors,
with or without shared memory, can be simulated
directly. The OASIS modules in such a simulation can
closely parallel the structure of actual system
software. Thus, once a design is shown viable by
simulation, the implementation of actual software can
be a simple translation of OASIS modules. A brief
overview of OASIS features is presented followed by a
simple example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sanguinetti:1979:TIS,
author = "John Sanguinetti",
title = "A technique for integrating simulation and system
design",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "163--172",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805457",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A technique for simulating incomplete systems is given
which allows performance prediction during system
design. This technique, called integrated simulation,
allows the system design to itself be a simulation
model, thus avoiding the overhead of maintaining a
separate, valid simulation model for the system. The
paper presents integrated simulation in the framework
of a system modeling language called the Program
Process Modeling Language, PPML. This language provides
a means for describing systems of concurrent processes
in both abstract and explicit terms, thus lending
itself well to a top-down design method. In the design
process, any PPML representation of the system can be
simulated directly, from the most abstract design to
the completely elaborated system. Simulation of the
completely elaborated system is, in fact, simply the
system in execution. The paper defines PPML and
describes the techniques required to simulate PPML
systems given various underlying machines. It concludes
with a discussion of the limitations of the integrated
simulation method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Razouk:1979:EMS,
author = "Rami R. Razouk and Mary Vernon and Gerald Estrin",
title = "Evaluation methods in {SARA} --- the graph model
simulator",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "189--206",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805458",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The supported methodology evolving in the SARA (System
ARchitects' Apprentice) system creates a design
frame-work on which increasingly powerful analytical
tools are to be grafted. Control flow analyses and
program verification tools have shown promise. However,
in the realm of the complex systems which interest us
there is a great deal of research and development to be
done before we can count on the use of such powerful
tools. We must always be prepared to resort to
experiments for evaluation of proposed designs. This
paper describes a fundamental SARA tool, the graph
model simulator. During top-down refinement of a
design, the simulator is used to test consistency
between the levels of abstraction. During composition,
known building blocks are linked together and the
composite graph model is tested relative to the lowest
top-down model. Design of test environments is
integrated with the multilevel design process. The SARA
methodology is exemplified through design of a higher
level building block to do a simple FFT.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:1979:MSD,
author = "Stone H. Yu and Tadao Murata",
title = "Modeling and simulating data flow computations at
machine language level",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "207--213",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805459",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is concerned with the data flow
organization of computers and programs, which exhibits
a good deal of inherent concurrencies in a computation
by imposing no superfluous precedence constraints. In
view of the popularity of parallel and distributed
processing, this organization can be expected to play
an increasingly prominent role in the design and
development of computer systems. A schematic diagram
called DF-graphs, suitable for modeling data flow
computations at the machine language level, is
introduced. To facilitate the storage of DF-graphs in
computers, matrix equations which fully describe their
structure and their dynamic behaviors are developed as
an alternate representation. Also demonstrated is the
feasibility of simulating the execution of computations
specified by DF-graphs on a network of conventional
mini- and microprocessors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mattheyses:1979:MSA,
author = "R. M. Mattheyses and S. E. Conry",
title = "Models for specification and analysis of parallel
computing systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "215--224",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805460",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of designing a properly functioning
parallel hardware or software system is considerably
more difficult than that of designing a similar
sequential system. In this paper we formulate criteria
which a design methodology for parallel systems should
satisfy and explore the use of various models as the
basis for such a design tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gertner:1979:PEC,
author = "Ilya Gertner",
title = "Performance evaluation of communicating processes",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "241--248",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805461",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper concerns the performance evaluation of an
operating system based on communicating processes.
Processes communicate via messages and there is no
shared data. Execution of a program is abstracted as a
sequence of events to denote significant computational
steps. A finite state machine model of computation is
used for the specifications of abstract computational
properties and, thereafter, for the selective analysis
of measurement data. A set of conventions is developed
to characterize the performance of communicating
processes. A hierarchical layering technique is used to
concisely describe the characteristics of large
systems. A performance monitoring system was
implemented and applied to the analysis of RIG, a
message-based operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spooner:1979:BIS,
author = "Christopher R. Spooner",
title = "Benchmarking interactive systems: {Producing} the
software",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "249--257",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/800188.805462",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The author has recently developed a new methodology of
benchmarking, which is being applied to a procurement
in which (a) a single integrated interactive
application is to span a distributed configuration of
computing hardware, (b) the configuration is unknown
when the benchmark is being developed, and (c) the
application software will be written after the
benchmark has been run. The buyer prepares a simulation
model of the intended application in the form of
programs that will run on the hardware being
benchmarked. Each competing vendor is expected to tune
the performance of this model to the hardware
configuration that he has proposed, so he will require
several versions of the model. This presents the buyer
with a formidable software-production problem, which is
further complicated by a requirement for extreme
flexibility and reliability. The paper addresses the
software-production problem and describes its solution.
The solution was to develop an automated
code-production system based on two principal design
features. First, the model and its translator are both
written in the same language; secondly, the common
language is selected on the basis of readability and
extensibility. The paper examines why this approach to
the code-production problem was successful. Though the
code-production system was developed to support a
particular benchmarking approach, it should also be
useful in other modeling situations. Indeed it might be
of interest in any field where readability,
reliability, ease of maintenance, and economy of
programming effort are considered important.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dujmovic:1979:CCP,
author = "Jozo J. Dujmovi{\'c}",
title = "Criteria for computer performance analysis",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "259--267",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805463",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer evaluation, comparison, and selection is
essentially a decision process. The decision making is
based on a number of worth indicators, including
various computer performance indicators. The
performance indicators are obtained through the
computer performance measurement procedure.
Consequently, this procedure should be completely
conditioned by the decision process. This paper
investigates various aspects of computer performance
measurement and evaluation procedure within the context
of computer evaluation, comparison and selection
process based on the Logic Scoring of Preference
method. The set of elementary criteria for performance
evaluation is proposed and the corresponding set of
performance indicators is defined. The necessary
performance measurements are based on the standardized
set of synthetic benchmark programs and include three
separate measurements: monoprogramming performance
measurement, multiprogramming performance measurement,
and multiprogramming efficiency measurement. Using the
proposed elementary criteria, the measured performance
indicators can be transformed into elementary
preferences and aggregated with other non-performance
elementary preferences obtained through the evaluation
process. The applicability of presented elementary
criteria is illustrated by numerical examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
xxauthor = "Jozo J. Dujomovi{\'c}",
}
@Article{Dyal:1979:SBS,
author = "James O. Dyal and William {DeWald, Jr.}",
title = "Small business system performance analysis",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "269--275",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805464",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents results from the performance
simulation study of a small business-oriented computer
system. The system, SPERRY UNIVAC BC/7-700, is
commercially available in the configuration modeled and
in other higher performance models. All BC/7 systems
modeled are supported with highly interactive
applications software systems. The model is
parameterized to select one or more workstations and
one or more cartridge disks. File allocations are by
cylinder. Seek times are computed by remembering the
position of each movable arm. References are randomized
within each file, but the sequence in which files are
accessed is controlled by the application logic, in
conjunction with the number of line items/order. Most
event times are not constant, but the result of drawing
randomly against empirical distributions with specified
mean and standard deviation. For this study, the system
simulated is composed of a single work-station running
the highly interactive on-line version of a
sophisticated order entry application package.
Principal performance measures are system throughput
and response time, including operator action times. It
is found that, in the single workstation environment,
performance is very cost effective in this highly
competitive part of the information system market.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huff:1979:SCR,
author = "Robert W. Huff",
title = "System characterization of a {Retail Business
System}",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "277--284",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805465",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexities of Retail Business Systems today
require a thorough understanding of how functional
requirements impact desired system performance. It is
no longer feasible to discretely test and evaluate
individual system components without considering their
inter-relationship. The techniques described in this
presentation will define the method of system
characterization of products prior to customer
delivery. Three techniques are utilized to characterize
system performance --- simulation, stimulation, and
performance measurement. Simulation involves writing a
mathematical model which is enhanced from a product
feasibility model to a system configuration tool as a
result of stimulation and measurement activities.
Stimulation consists of using emulators to load the
system component under test as if the actual system is
inter-connected. The emulators are programmed to
produce a processing volume which can exceed the peak
benchmark of the potential user. Performance
measurement is accomplished during the stimulation
activity using hardware/ software probes to monitor
specific system parameters. These monitors provide
vital information to determine total system capacity
and the expected system performance for a given
configuration. The information derived from system
characterization is invaluable in providing the
customer with a realistic expectation of system
capability to perform its present functions and in
projecting future growth potential.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stroebel:1979:FPA,
author = "Gary Stroebel",
title = "Field performance aids for {IBM GSD} systems",
journal = j-SIGMETRICS,
volume = "8",
number = "3",
pages = "285--291",
month = "Fall",
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1013608.805466",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:53:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A series of field performance aids have been developed
to assist IBM Systems Engineers evaluate the
performance of System/3, System/34, and System/38
configurations. Use of those aids is appropriate at
proposal time, for preinstallation design, for tuning,
and for upgrade studies. This paper overviews some of
the key features of these aids as they pertain to the
user interface, workload characterization, and
performance models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1979:FAP,
author = "Jon D. Clark",
title = "A feature analysis of performance evaluation texts",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "9--11",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Dowdy:1979:SWT,
author = "Lawrence W. Dowdy",
title = "Synopsis of workshop on the theory and application of
analytical models to {ADP} system performance
prediction",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "13--17",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Petrella:1979:SWS,
author = "Arthur Petrella and Harold Farrey",
title = "Simulating working sets under {MVS}",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "24--36",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pierson:1979:PEM,
author = "Daniel L. Pierson",
title = "Performance evaluation of a minicomputer-based data
collection system",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "37--44",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Artis:1979:CPM,
author = "H. Pat Artis",
title = "Capacity planning for {MVS} computer systems",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "45--62",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1979:PVM,
author = "M. K. Rajaraman",
title = "Performance of a virtual memory: some experimental
results",
journal = j-SIGMETRICS,
volume = "8",
number = "4",
pages = "63--68",
month = dec,
year = "1979",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:32 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports the results of simulation
experiment of a model of a virtual memory computer. The
model consists of three major subsystems: Program
Behavior, Memory Allocation and Secondary Storage. By
adapting existing models of these subsystems an overall
model for the computer operation is developed and its
performance is tested for various design alternatives.
The results are reported for different paging devices,
levels of multiprogramming, job mixes, memory
allocation scheme, page service scheduling and page
replacement rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1980:WWS,
author = "Peter J. Denning",
title = "What's a working set?",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "6--10",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I am writing about the paper by A. Petrella and H.
Farrey, of IBM, SIMULATING WORKING SETS UNDER MVS,
reprinted in the SIGMETRICS Newsletter, Issue (8, 4),
winter 1979-80. The paper is an amalgam of very good
modeling work and misinformation about the working set
concept. I will summarize the important contributions
and give a short essay about working sets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Petrella:1980:SWS,
author = "Arthur Petrella and Harold Farrey",
title = "Simulating working sets under {MVS}",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "11--23",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041874",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure of ten ranks high
in curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal it self for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1980:MEP,
author = "Mitchell G. Spiegel",
title = "Measuring and evaluating performance",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "33--34",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The subject of system performance measurement and
evaluation has undergone as many generations of changes
as the systems themselves. The problem of what to
measure and evaluate is complicated by the fact that
computing and communications, having become technically
similar (digital), will undergo further fusion. Because
the technologies are merging, a comparison of their
respective origins is instructive. Communications and
computing do not share a common history. Communications
performance evaluation began as a turn-of-the-century
issue. Important performance attributes of voice
communications systems were accessability and
reliability. The general public and communications
system analysts always viewed the voice communications
systems as a bundled service, with little emphasis on
the characteristics of its individual components.
Performance was `engineered' into communications
systems for given workload capacity levels (traffic). A
reliable service offering evolved over two decades
(1920's and 1930's) and was expanded to include data as
well as voice communications. The voice network used
primarily analog transmission techniques, because voice
traffic grew far more rapidly than data. Pulse code
modulation (PCM) techniques, employing digital
transmission, reversed the trend of analog circuitry.
In the future, communications transmission, switching,
and integrated services networks (voice, data,
facsimile, picture) will be implemented exclusively
with digital techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dixon:1980:PMI,
author = "P. J. Dixon",
title = "Planning {MIS} investment and expense levels",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "35--37",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Asking for capital for data processing and
telecommunications equipment in not exactly popular
with most Boards of Directors in most companies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moran:1980:CPV,
author = "Thomas S. Moran",
title = "Capacity planning: `the volume'",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "38--40",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Some comments on past, present, and future measures of
volume as it affects planning for computer systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeMarco:1980:BLB,
author = "Tom DeMarco",
title = "Breaking the language barrier",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "41--45",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The systems analyst and the user are not independent
entities; each depends on the other. When communication
problems get in their way, however, the relationship
can turn adversary. The real problem in most system or
program development efforts may be that English, left
to itself, is too subtle, too open to personal
interpretation, to be appropriate in the structured
world of DP.Tom DeMarco shows how to impose limits on
our native language so analysts, designers, programmers
and users can safely use it to define what they are
trying to develop. This week he starts by giving some
hints on that most basic of DP jobs, setting up the
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Giles:1980:CSM,
author = "Howard L. Giles",
title = "Communications systems management",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "46--51",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As data processing systems have grown from primarily
batch-oriented applications to today's fairly extensive
on-line systems, the management system required to
control these resources has changed. This system
evolution is forcing management to focus their
attention on controlling the distribution of
information to various users performing many diverse
applications. Communications Systems Management is the
process used to manage and control the distribution of
information in an on-line system for maximum
performance and productivity. It consists of those
techniques and tools needed to operate, maintain,
repair, install and plan for the continuous operation
of a communications-oriented information system. The
following pages describe the management functions
needed to ensure that on-line system operation will be
successful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Erlandson:1980:SEM,
author = "Robert F. Erlandson",
title = "System evaluation methodologies: combined
multidimensional scaling and ordering techniques",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "52--58",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is a difficult task to evaluate existing
large-scale systems; it is even more difficult to
evaluate alternative designs for future systems. Yet,
such decisions are necessary because of the long
development and implementation times involved.
Decisions must be made today about future systems for
telecommunications, power, health-care delivery,
transportation, etc. These systems change slowly
because additions or modifications are costly and must
mesh with the existing elements, hence, great care must
be given to the establishment of long-term goals and
the evaluation of alternative future system designs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pearson:1980:MCU,
author = "Sammy W. Pearson and James E. Bailey",
title = "Measurement of computer user satisfaction",
journal = j-SIGMETRICS,
volume = "9",
number = "1",
pages = "59--68",
month = "Spring",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041872.1041881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:40 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the development and evaluation of
a questionnaire designed to quantitatively measure
computer user satisfaction. The administration,
scoring, and interpretation of the questionnaire are
also addressed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandy:1980:CAP,
author = "K. Mani Chandy and Charles H. Sauer",
title = "Computational algorithms for product form queueing
networks",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "1--1",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806144",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the last two decades there has been special
interest in queueing networks with a product form
solution. These have been widely used as models of
computer systems and communication networks. Two new
computational algorithms for product form networks are
presented. A comprehensive treatment of these
algorithms and the two important existing algorithms,
convolution and mean value analysis, is given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Performance evaluation; Product form; Queueing
networks",
}
@Article{Gordon:1980:ICP,
author = "Karen D. Gordon and Lawrence W. Dowdy",
title = "The impact of certain parameter estimation errors in
queueing network models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "3--9",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806145",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The effect that parameter estimation errors have on
performance in closed product form queueing networks is
investigated. In particular, the effects of errors in
the relative utilization estimates of the servers are
analyzed. It is shown that in single class load
independent networks, the resulting errors in
throughput and utilizations are of approximately the
same percentage as the errors in the relative
utilization estimates. This result does not hold in
networks with load dependent servers or multiple
customer classes. The percentage errors in mean queue
length depend upon the degree of multiprogramming in
the network. Errors in mean queue lengths can become
unbounded as the degree of multiprogramming becomes
unbounded. Implications of these results to computer
system modeling are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1980:XIM,
author = "Russ Blake",
title = "{XRAY}: {Instrumentation} for multiple computers",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "11--25",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "XRAY presents a global view of the performance of
hardware and software components on multiple,
distributed computers. The set of components chosen for
measurement can be changed at any time throughout a
network of systems, and can be selected to minimize
data collection time and measurement space. In the
course of normal activities the operating system
executes firmware which increments counters for the
measured components. Periodically, the counters are
recorded in an ordinary file by a process in each
processor. An analysis program permits browsing through
components and plotting counters in real time. Analysis
focuses on detecting the distributed sources of
excessive activity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1980:DDA,
author = "James H. Hughes",
title = "{DIAMOND} a digital analyzer and monitoring device",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "27--34",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806147",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the design and application of a
special purpose computer system. It was developed as an
internal tool by a computer manufacturer, and has been
used in solving a variety of measurement problems
encountered in computer performance evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bashioum:1980:BIS,
author = "Douglas L. Bashioum",
title = "Benchmarking interactive systems: {Calibrating} the
model",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "35--41",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A methodology for benchmarking dedicated, interactive
systems has been developed at The MITRE Corporation.
This methodology uses a synthetic program model of the
application which runs on the proposed
hardware/operating system configurations and is driven
by a statistically derived load. System performance is
measured by analyzing the synthetic transaction
response times. The methodology yields assurances to a
buyer that the benchmarked system has at least an a
priori defined amount of computer power available for
applications-oriented software. This paper examines the
methodology and the problems that were encountered and
solutions which have been used in calibrating a
benchmark model for a specific application. The
benchmark was designed to model a large interactive
information processing application on a procurement
requiring loosely-coupled (no shared memory)
multicomputer systems. The model consists of a set of
interacting synthetic program cells, each composed of
several abstractly defined components. The model is
maintained in a very high level language that is
automatically translated into a standard High Order
Language (typically FORTRAN or COBOL) for delivery to
the competing vendors. These delivered model cells
contain automatically generated size and time filler
code that ``calibrate'' the cells to consume the
appropriate CPU time and memory space as defined by the
abstract size units after accounting for each vendor's
hardware and proposed system design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Benchmark; Calibration; Computer performance
measurement; Distributed processing; Interactive
systems; Modeling; Real-time; Simulation; Synthetic
programs",
}
@Article{Lehmann:1980:PEP,
author = "Axel Lehmann",
title = "Performance evaluation and prediction of storage
hierarchies",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "43--54",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806149",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a modelling methodology combining
simulation and analysis for computer performance
evaluation and prediction. The methodology is based on
a special workload model that is suitable for the
generation and description of dynamic program
behaviour. A description of this workload model is
given in section 2. The applicability of this concept
with respect to the design of new storage systems, as
well as the improvement or comparison of existing
systems, will be described by investigation of the
efficiency of small cache memories in section 3.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alanko:1980:MER,
author = "Timo O. Alanko and Ilkka J. Haikala and Petri H.
Kutvonen",
title = "Methodology and empirical results of program behaviour
measurements",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "55--66",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Program behaviour characteristics were examined using
data gathered from real program executions. Experiments
were performed in a segmented virtual memory with a
working set policy; the analyzing costs were kept low
using an efficient data reduction method. Empirical
results were obtained concerning the influence of the
window size on program behaviour characteristics, the
accuracy of some average working set size
approximations and the sensitivity of program behaviour
to the program's input data. These results show that
some commonly used assumptions concerning program
behaviour are inaccurate. Also there seem to exist
``ill-behaving'' programs, the behaviour of which does
not correspond well with results obtained earlier. The
effects of real-time delays during program execution
were considered using a new simple method. As an
additional experiment, segmenting and paging were
compared using various performance statistics; the
results seem to favour segmenting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:1980:PRB,
author = "Gopa Kumar and C. Thomas Nute",
title = "Program restructuring for block structured languages",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "67--79",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806151",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Prior studies of program restructuring to increase the
degree of locality of a program in a paged virtual
memory system were restricted to statically allocated
codes only. This work develops a restructuring
methodology for block structured languages like Algol,
with dynamic memory allocation. We subsequently
restructure and analyze different classes of programs
using this methodology and study the performance gains
realized with different restructuring heuristics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vantilborgh:1980:NCD,
author = "Hendrik T. Vantilborgh and Richard L. Garner and
Edward D. Lazowska",
title = "Near-complete decomposability of queueing networks
with clusters of strongly interacting servers",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "81--92",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The near-complete decomposability of queueing network
models of computer systems is generally supported by
very large differences in the service rates of the
servers. In this paper we show how such models may
still be nearly completely decomposable if on the one
hand these large differences can no longer be
realistically assumed (as is the case, for example, in
computer networks) but if on the other hand clusters of
strongly interacting servers exist. Our results may be
viewed as a bridge between the approaches to the
approximate analysis of queueing networks advanced by
Courtois and by Chandy, Herzog and Woo, since we show
circumstances under which the former approach leads to
exactly the same method of analysis as the latter. In
contrast to the Chandy, Herzog and Woo theorem,
however, the theory of near-complete decomposability
does not rely on the beneficent properties of queueing
networks exhibiting product form solutions. Thus our
results may point the way towards the theoretically
sound application of simple and intuitively appealing
approximate analysis techniques to non-product-form
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwajn:1980:FRE,
author = "Alexandre Brandwajn",
title = "Further results on equivalence and decomposition in
queueing network models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "93--104",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806153",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses three aspects related to the
notion of exact equivalence in queueing models. In many
cases the parameters of a system equivalent to a given
model involve only a small subset of conditional
probabilities of the state of the original model given
the equivalent one. It is shown that meaningful bounds
may be obtained for the conditional probabilities of
interest with little computational effort. Such bounds
are useful in assessing processing capacities as well
as the accuracy of approximate solutions. As a second
point it is shown that the notion of exact equivalence
may be easily extended to networks with non-exponential
servers. This is done for both the methods of
supplementary variables and for the embedded Markov
chain technique. Qualitative analysis of approximation
methods is also discussed. Finally, numerical methods
based on the notion of exact equivalence, i.e.
operating on conditional probabilities, are
considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stewart:1980:ECF,
author = "William J. Stewart and Gerald A. Zeiszler",
title = "On the existence of composite flow equivalent
{Markovian} servers",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "105--116",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing networks have been used to model a large
variety of complex systems. However, once a realistic
model has been constructed it has generally been
necessary to distort and modify it so that an analytic
solution could be obtained. Unfortunately, the analytic
solution often has little relation to the original
queueing system and consequently often produces
solutions with poor accuracy. We begin with a brief
introduction to the concepts of decomposition and
aggregation. Application of these and other approximate
methods to the analysis of computer systems are
discussed by Chandy and Sauer [CHAN78].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marie:1980:CEP,
author = "Raymond Marie",
title = "Calculating equilibrium probabilities for {$ \lambda
(n) / C_k / 1 / N $} queues",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "117--125",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806155",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Equilibrium state distributions are determined for
queues with load-dependent Poisson arrivals and service
time distributions representable by Cox's generalized
method of stages. The solution is obtained by
identifying a birth-death process that has the same
equilibrium state distribution as the original queue.
Special cases of two-stage (C2) and Erlang-k (Ek)
service processes permit particularly efficient
algorithms for calculating the load-dependent service
rates of the birth-death process corresponding to the
original queue. Knowing the parameters of the
birth-death process, the equilibrium state
probabilities can be calculated straight-forwardly.
This technique is particularly useful when subsystems
are reduced to flow-equivalent servers representing the
complementary network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:1980:HCS,
author = "Robert A. Wagner and Kishor S. Trivedi",
title = "Hardware configuration selection through discretizing
a continuous variable solution",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "127--142",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper extends a previous model for computer
system configuration planning developed by the authors.
The problem is to optimally select the CPU speed, the
device capacities, and file assignments so as to
maximize throughput subject to a fixed cost constraint.
We advocate solving this essentially discrete problem
in continuous variables followed by an appropriate
discretization. The discretization error thus committed
is analyzed in detail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bard:1980:MSD,
author = "Yonathan Bard",
title = "A model of shared {DASD} and multipathing",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "143--143",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a model of an I/O subsystem in
which devices can be accessed from multiple CPUs and/or
via alternative channel and control unit paths. The
model estimates access response times, given access
rates for all CPU-device combinations. The systems
treated are those having the IBM System/370
architecture, with each path consisting of a CPU,
channel, control unit, head of string, and device with
rotational position sensing. The path selected for an
access at seek initiation time remains in effect for
the entire channel program. The computation proceeds in
three stages: First, the feasibility of the prescribed
access rates is determined by solving a linear
programming problem. Second, the splitting of access
rates among the available paths is determined so as to
satisfy the following principle: The probability of
selecting a given path is proportional to the
probability that the path is free. This condition leads
to a set of nonlinear equations, which can be solved by
means of the Newton--Raphson method. Third, the RPS hit
probability, i.e. the probability that the path is free
when the device is ready to transmit, is computed in
the following manner: From the point of view of the
selected path, the system may be viewed as being in one
of 25 possible states. There are twelve different
subsets of states whose aggregate probabilities can be
computed from the (by now) known flow rates over the
various paths. The maximum entropy principle is used to
calculate the unknown state probabilities, with the
known aggregate probabilities acting as constraints.
The required RPS hit probability can be computed easily
once the state probabilities have been determined.
Explicit formulas are given for all these quantities.
Empirically derived formulas are used to compute the
RPS miss probability on subsequent revolutions, given
the probability on the first revolution. The model is
validated against a simulator, showing excellent
agreement for systems with path utilizations up to 50
percent. The model is also validated against
measurements from a real three-CPU system with 31
shared devices. In this validation, the I/O subsystem
model acts as a common submodel to three copies of a
system model, one for each CPU. Estimated end-user
transaction response times show excellent agreement
with the live measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lo:1980:CCP,
author = "T. L. Lo",
title = "Computer capacity planning using queueing network
models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "145--152",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents several computer capacity planning
case studies using a modeling tool, BEST/1, derived
from the theory of queueing networks. All performance
predictions were evaluated based on the selected
service levels such as response times and throughputs.
Advantages and disadvantages of using the modeling
approach are also briefly discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kurinckx:1980:OVC,
author = "A. Kurinckx and G. Pujolle",
title = "Overallocation in a virtual circuit computer network",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "153--158",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study the end-to-end control through
virtual circuits in a computer network built following
the X.25 Recommendations. We develop a mathematical
model to obtain the maximum overallocation of node
buffers, in order for the probability of overflow not
to exceed a given value.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Upton:1980:ADA,
author = "Richard A. Upton and Satish K. Tripathi",
title = "Analysis of design alternatives for a packet switched
{I/O} system",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "159--171",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806160",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an application of analytical
modeling to the design and evaluation of a general
purpose, packet-switched image processing system that
will soon enter an implementation phase. A bottom-up
modeling approach is used to evaluate such design
issues as optimal packet size, optimal channel access
method(s), and required number of processors and disks.
Based on the characteristics of various hardware
components and the predicted workload, specific design
recommendations are made.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balkovich:1980:PDS,
author = "Edward E. Balkovich and Colin Whitby-Strevens",
title = "On the performance of decentralized software",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "173--180",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distribution of computing to achieve goals such as
enhanced reliability depend on the use of decentralized
software. Decentralization typically replaces a
sequential process by a system of small, concurrent
processes that interact frequently. The implementation
of processes and their interactions represents a cost
incurred as a result of decentralization. Performance
measurements are reported in this paper for
decentralized software written in a programming
language for distributed computer systems. These
performance measurements confirm that low-cost
implementations of concurrency are possible, but
indicate that decentralized software makes heavy use of
run-time functions managing concurrency. An initial
model comparing the performance of a specific
decentralized software structure to its centralized
counterpart indicates that these implementation costs
are generally offset by the performance improvements
that are due to the parallelism inherent in the
decentralized structure. The research facilities for
continued study of decentralized software performance
are described in the summary.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Concurrent software; Decentralized control;
Decentralized software; Distributed computer systems;
Performance measurement and evaluation",
}
@Article{Grit:1980:PMA,
author = "Dale H. Grit and Rex L. Page",
title = "Performance of a multiprocessor for applicative
programs",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "181--189",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Applicative programming languages provide
opportunities for parallel processing without requiring
the programmer to be concerned with explicit
synchronization of portions of the computation. We
present a computational model of a multiprocessor which
executes applicative programs, and we analyze the
expected performance of the model via simulation. As
the number of processors is doubled, elapsed execution
time is nearly halved, until system bottlenecks occur.
An alternative model is proposed which alleviates these
bottlenecks. The basis of the second model is an
interconnection switch which is characterized by $ \log
(n) $ access time and $ n \log (n) $ cost.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dhas:1980:PEF,
author = "C. Retna Dhas",
title = "Performance evaluation of a feedback data flow
processor using simulation",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "191--197",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a method to estimate the
performance of a feedback data flow processor using
software simulation. A brief over view of a data flow
language and a data flow processor along with the
conceptual view of a software simulator are described.
Numerical measurements of parallelism and resources
requirements are obtained by translating high level
language programs to data flow language and then
executing them on the simulator.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bryant:1980:HMG,
author = "Raymond M. Bryant",
title = "On homogeneity in {M\slash G\slash 1} queueing
systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "199--208",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Operational analysis replaces certain classical
gueueing theory assumptions with the condition of
``homogeneous service times.'' In this paper, we show
that the sample paths of an M/G/1 queueing system have
this property with non-zero probability if and only if
the service time distribution is exponential. We also
consider the relationship of the operational
performance measures S(n) and the mean service time.
This relationship is shown to depend on the form of the
service distribution. It follows that using operational
analysis to predict the performance of an M/G/1
queueing system when the mean service time is changed
will be most successful when the service time
distribution is exponential. Simulation evidence is
presented which supports this claim.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1980:ORP,
author = "E. G. {Coffman, Jr.} and Erol Gelenbe and Roger C.
Wood",
title = "Optimal replication of parallel-read, sequential-write
systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "209--216",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Frequently used computer elements that can be written
by at most one process at a time constitute important
bottlenecks in multiprocessor system operation,
particularly when such elements are accessible only
serially. Hardware devices, data files, system tables
and critical sections in general may be examples of
such elements. One common way to relieve this
congestion is to provide several copies of the element,
which can then be read (used) in parallel. However, the
requirement that writing (changing) remain sequential
means that writing times increase with the number of
copies provided. The optimization question in this
trade-off is the main concern of this paper. A
probability model of such a system is formulated with
the objective of obtaining read-rate capacities as a
function of read/write loads and the number of copies
provided. The above optimization problem is expressed
in terms of these results and then solved. In
particular, it is shown how to select the number of
copies that maximizes the read-rate capacity for given
system parameters. Two distinct operating regimes,
based on how interrupted read operations are restarted,
are analyzed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shore:1980:LRO,
author = "John E. Shore",
title = "The lazy repairman and other models: {Performance}
collapse due to overhead in simple, single-server
queuing systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "217--224",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider two simple models of overhead in batch
computer systems and demand access communications
systems. The first, termed ``modified M/M/1/K, ``is an
exponential, single-server queuing system with finite
storage capacity, constant arrival rate, and
queue-length-dependent service time. We consider cases
in which the expected service time consists of a
constant plus a term that grows linearly or
logarithmically with the queue length. We show that the
performance of this system --- as characterized by the
expected number of customers in the system, the
expected time in the system, and the rate of missed
customers --- can collapse as the result of small
changes in the arrival rate, the overhead rate, or the
queue capacity. The system has the interesting property
that increasing the queue capacity can decrease
performance. In addition to equilibrium results, we
consider the dynamic behavior of the model. We show
that the system tends to operate in either of two
quasi-stable modes of operation --- one with low queue
lengths and one with high queue lengths. System
behavior is characterized by long periods of operation
in both modes with abrupt transitions between them. We
point out that the performance of a saturated system
may be improved by dynamic operating procedures that
return the system to the low mode. In the second model,
termed the ``lazy repairman, ``the single server has
two distinct states: the ``busy'' state and the
``lazy'' state. Customers receive service only when the
server is in the busy state; overhead is modeled by
attributing time spent in the lazy state to overhead
functions. When the expected time spent in the lazy
state increases with the number of customers waiting
for service, the behavior of the lazy repairman model
is similar to the modified M/M/1/K, although the lazy
repairman model makes it easier to study in detail the
effects of overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:1980:RTD,
author = "Simon S. Lam and A. Udaya Shankar",
title = "Response time distributions for a multi-class queue
with feedback",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "225--234",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A single server queue with feedback and multiple
customer classes is analyzed. Arrival processes are
independent Poisson processes. Each round of service is
exponentially distributed. After receiving a round of
service, a customer may depart or rejoin the end of the
queue for more service. The number of rounds of service
required by a customer is a random variable with a
general distribution. Our main contribution is
characterization of response time distributions for the
customer classes. Our results generalize in some
respects previous analyses of processor-sharing models.
They also represent initial efforts to understand
response time behavior along paths with loops in local
balanced queueing networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1980:AIO,
author = "Y. T. Wang",
title = "Analysis of an intrinsic overload control for a class
of queueing systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "235--243",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806168",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a priority queueing system which consists
of two queues sharing a processor and in which there is
delayed feedback. Such a model arises from systems
which employ a priority assignment scheme to achieve
overload control. An analytic expression for the
stationary probability of the queue lengths is derived.
An algorithm is proposed to compute the queue lengths
distribution. Some numerical results are illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1980:ASD,
author = "Connie Smith and J. C. Browne",
title = "Aspects of software design analysis: {Concurrency} and
blocking",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "245--253",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806169",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper extends previous work on development of a
methodology for the prediction of the performance of
computer software systems from design level
specifications and continuing through implementation.
The effects of synchronized behavior, such as results
from data reservation in multi-thread executions of
data base systems, and competition for host system
resources are incorporated. The previous methodology
uses hierarchical graphs to represent the execution of
software on some host computer system (or on some
abstract machine). Performance metrics such as response
time were obtained from analysis of these graphs
assuming execution of a single copy on a dedicated
host. This paper discusses the mapping of these
execution graphs upon queueing network models of the
host computing environment to yield performance metric
estimates for more complex and realistic processing
environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Potier:1980:ALP,
author = "D. Potier and Ph. Leblanc",
title = "Analysis of locking policies in database management
systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "255--255",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806170",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Quantitative analysis of locking mechanisms and of
their impact on the performance of transactionnal
systems have yet received relatively little attention.
Although numerous concurrency mechanisms have been
proposed and implemented, there is an obvious lack of
experimental as well as analytical studies of their
behaviour and their influence on system performance. We
present in this paper an analytical framework for the
performance analysis of locking mechanisms in
transactionnal systems based on hierarchical analytical
modelling. Three levels of modelling are considered: at
level 1, the different stages (lock request, execution,
blocking) transactions of through during their
life-time are described; the organization and
operations of the CPU and I/O resources are analysed at
level 2; transaction's behaviour during their lock
request phase is analysed at modelling level 3. This
hierarchical approach is applied to the analysis of a
physical locking scheme involving a static lock
acquisition policy. A simple probabilistic model of the
transaction behaviour is used to derived the
probability that a new transaction is granted the locks
it requests given the number of transactions already
active as a function of the granularity of the
database. On the other hand, the multiprogramming
effect due to the sharing of CPU and I/O resources by
transactions is analysed using the standard queueing
network approaches and the solution package QNAP. In a
final step, the results on the blocking probabilities
and the multiprogramming effect are used as input of a
global performance model of the transactionnal system.
Markovian analysis is used to solve this model and to
obtain the throughput of the system as a function of
the data base granularity and other parameters. The
results obtained provide a clear understanding of the
various factors which determine the global performance,
of their role and importance. They also raise many new
issues which can only be solved by further extensive
experimental and analytical studies and show that two
particular topics deserve special attention: the
modelling of transaction behaviour and the modelling of
locking overheads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1980:ONC,
author = "E. G. {Coffman, Jr.} and E. Gelenbe and B. Plateau",
title = "Optimization of the number of copies in a distribution
data base",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "257--263",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806171",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the effect on system performance of the
distribution of a data base in the form of multiple
copies at distinct sites. The purpose of our analysis
is to determine the gain in READ throughput that can be
obtained in the presence of consistency preserving
algorithms that have to be implemented when UPDATE
operations are carried out on each copy. We show that
READ throughput diminishes if the number of copies
exceeds an optimal value. The theoretical model we
develop is applied to a system in which consistency is
preserved through the use of Ellis's ring algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruschitzka:1980:RJC,
author = "Manfred Ruschitzka",
title = "The response of job classes with distinct policy
functions (Extended Abstract)",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "265--265",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806172",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Policy function schedulers provide a flexible
framework for implementing a wide range of different
scheduling schemes. In such schedulers, the priority of
a job at any instant in time is defined by the
difference between the time it spent in the system and
an arbitrary function of its attained service time. The
latter is called the policy function and acts as the
functional parameter that specifies a particular
scheduling scheme. For instance, a constant policy
function specifies the first-come, first-serve
scheduling scheme. By changing the policy function, the
system behavior can be adjusted to better conform with
desired response characteristics. It is common to
express response characteristics in terms of a response
function, the average response time of a job
conditioned on its service requirement in equilibrium.
In this paper, we analyze processor-sharing M/G/1
systems in which the priorities of different classes of
jobs are determined by distinct policy functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:1980:PTO,
author = "K. H. Kim and Mahmoud Naghibzadeh",
title = "Prevention of task overruns in real-time
non-preemptive multiprogramming systems",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "267--276",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1009375.806173",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time multiprogramming systems, in which a
hardware processor is dynamically assigned to run
multiple software processes each designed to control an
important device (user), are considered. Each software
process executes a task in response to a service
request repeatedly coming from the corresponding user.
Each service task is associated with a strict deadline,
and thus the design problem that we are concerned with
is to ensure that the service tasks requested can
always be executed within the associated deadlines,
i.e., no task overrun occurs. This problem was studied
by several investigators for the cases where preemptive
scheduling strategies are used. In contrast, very few
studies have been conducted for cases of non-preemptive
scheduling. In this paper we show that a non-preemptive
strategy, called relative urgency non-preemptive (RUNP)
strategy, is optimal in the sense that if a system runs
without a task overrun under any non-preemptive
strategy, it will also run without a task overrun under
the RUNP strategy. Then an efficient procedure used at
the design time for detecting the possibility of a task
overrun in a system using the RUNP strategy is
presented. The procedure is useful in designing
overrun-free real-time multiprogramming systems that
yield high processor utilizations. Some special types
of systems using the RUNP strategy for which even
simpler detection procedures are available are also
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Non-preemptive scheduling; Real-time multiprogramming;
Relative urgency; Task overrun; Time critical process",
}
@Article{King:1980:NMI,
author = "P. J. B. King and I. Mitrani",
title = "Numerical methods for infinite {Markov} processes",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "277--282",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806174",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The estimation of steady state probability
distributions of discrete Markov processes with
infinite state spaces by numerical methods is
investigated. The aim is to find a method applicable to
a wide class of problems with a minimum of prior
analysis. A general method of numbering discrete states
in infinite domains is developed and used to map the
discrete state spaces of Markov processes into the
positive integers, for the purpose of applying standard
numerical techniques. A method based on a little used
theoretical result is proposed and is compared with two
other algorithms previously used for finite state space
Markov processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fayolle:1980:SCT,
author = "G. Fayolle and P. J. B. King and I. Mitrani",
title = "The solution of certain two-dimensional {Markov}
models",
journal = j-SIGMETRICS,
volume = "9",
number = "2",
pages = "283--289",
month = "Summer",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/800199.806175",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:54:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class of two-dimensional Birth-and-Death processes,
with applications in many modelling problems, is
defined and analysed in the steady-state. These are
processes whose instantaneous transition rates are
state-dependent in a restricted way. Generating
functions for the steady-state distribution are
obtained by solving a functional equation in two
variables. That solution method lends itself readily to
numerical implementation. Some aspects of the numerical
solution are discussed, using a particular model as an
example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1980:EIE,
author = "Jon D. Clark and Robert M. Golladay",
title = "Empirical investigation of the effectiveness of
several computer performance evaluation tools",
journal = j-SIGMETRICS,
volume = "9",
number = "3",
pages = "31--36",
month = "Fall",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041883.1041884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of tools exist for computer selection
evaluation. The operational cost of applying these vary
considerably as does the precision of the performance
prediction. This paper compares the precision of
several commonly used methods in a single test case,
namely cycle time, instruction mix analysis and
benchmarking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmark; computer; cycle time; instruction mix;
performance evaluation",
}
@Article{Estell:1980:BW,
author = "Robert G. Estell",
title = "Benchmarks and watermarks",
journal = j-SIGMETRICS,
volume = "9",
number = "3",
pages = "39--44",
month = "Fall",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041883.1041885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Historically, benchmark tests have been one of several
ways to size a computer system, and measure its
performance. Today, it is more important to test the
System Software than the machine hardware. (Thus the
term `watermark' (as on bond paper) for software
tests.) Watermarks of software suffer the same
limitations and risks as benchmarks of hardware: e.a.,
they should be supplemented with simulations, models,
and other analysis and design tools of our trade.
Perhaps most significantly, watermarks, like
benchmarks, can be biased by their creators.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleijnen:1980:SMM,
author = "J. P. C. Kleijnen",
title = "Scoring methods, multiple criteria, and utility
analysis",
journal = j-SIGMETRICS,
volume = "9",
number = "3",
pages = "45--56",
month = "Fall",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041883.1041886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scoring methods are popular in computer selection, and
try to combine different attributes into an overall
performance measure. Related is the multi-criteria
evaluation of computerized information systems. The
scoring method is criticized in the context of more
general utility models, popular in economics. Scoring
provides simplistic choice models, and should not be
used as predictive, causal models. Many references for
further study are included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1980:TTI,
author = "Peter J. Denning",
title = "A tale of two islands: a fable",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "7--10",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041889",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Once upon a time there were two islands. One was
called Stochasia. Its citizens were well cultured and
they had achieved high development in a system of
mathematics for random processes. The other island was
called Operatia. Its citizens were well cultured and
they had achieved high development in a system for
experimentation with nondeterminate phenomena. Both
civilizations were closed societies. Neither knew of
the other's existence, and it had been so since the
beginning of time. Neither would ever have known, had
it not been for the events I will describe shortly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yader:1980:ACP,
author = "Mark J. Yader",
title = "{ADP} capacity planning: a case study",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "11--25",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041890",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A case study of short-range ADP capacity planning is
presented and related to the process of long-range
planning. Short-range capacity planning is concerned
with identification of computer and communication
resources which will reach saturation levels in the
near future. The initial step in the short-range
planning process is to evaluate the performance of the
user's current system configuration and one or more
configuration enhancements with respect to their
effectiveness in supporting a projected workload.
Central to long-range planning is the evaluation of a
broader range of architectural alternatives, including
various distributed processing design. In both short
range and long range planning, system modeling is a
basic tool for evaluating alternatives. An analytic
network of queues model has been developed to reflect
both centralized and hierarchically distributed network
architectures. The application of the tool as part of
the short-range case study is described.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marrevee:1980:HFF,
author = "J. Marrev{\'e}e",
title = "How friendly and fast is {FAST DUMP RESTORE}",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "28--35",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "FAST DUMP RESTORE, shortly named FDR, is a very well
known software package, delivered by the software house
Innovation Data Processing, and in some countries of
Europe commercially supported by Westinghouse. This
package is used in many computer centres using one of
IBM's big operating systems e.g. MVT or MVS. According
to Innovation's own remarks it became one of the most
successful software products in the world with about
3000 users, and since 1974 it is every year on the
DATAPRO HONOR ROLL. It should, among others, provide
superior performance on creation of dumps or restores
of disk packs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bernard:1980:SUM,
author = "J. C. Bernard",
title = "{T-scan}: the use of micro computers for response time
measurements",
journal = j-SIGMETRICS,
volume = "9",
number = "4",
pages = "39--50",
month = "Winter",
year = "1980",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041888.1041892",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:55:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "All large computer centers are actually faced with a
major change in their workload. Most applications are
leaving batch operations for time sharing ease of use.
In fact, all kinds of computer work could be performed
through a console: development, maintenance, data base
query and update and even batch control and submit. A
second problem arises as end-user profile is no more
computer oriented. Users only look at the time the
system needs to answer their requests, and don't care
about the computer game. So performance analysts and
operations managers are supposed to achieve a certain
level of service which they are almost unable to
measure. We try in this paper to discuss some major
problems related to conversational computer operations.
We will present several drawbacks characterising the
currently existing solutions. A problem that lead us to
define simple operating principle for response time
measurements. This principle is implemented in a fully
automatic measurement tool named T-SC",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bell:1981:SLC,
author = "Thomas E. Bell",
title = "Structured life-cycle assumptions",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "1--3",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New programmers, some managers, and lots of users
don't understand the advantages of a structured
software life-cycle. However, only a single experience
with coding while designing will convince any incipient
software engineer that a controlled process is needed
from the time of system concept though the last
maintenance phase. Software Configuration Management
has become almost a religion, and EDP auditors have
even encountered a few systems that appear to have been
specified, then designed, then implemented, then
tested, and finally installed --- all before
maintenance and redefinition occurred. Perhaps the
millennium has finally arrived, and software people
will soon live in a controlled world with rational
practices. If you are tempted to believe the foregoing
prediction, read the latest issue of FORTUNE, the WALL
STREET JOURNAL, or COMMERCE BUSINESS DAILY and note a
few problems that may divert us from the path to
Nirvana. Data Processing supports commercial,
educational, industrial, and governmental activities
that are frequently (and repeatedly) redirected. Under
circumstances of a largely random environment with
thorough business planning a rarity, a critical support
activity can expect to be redirected frequently. New
ideas will be sliced into partly-completely DP
projects, and users ``analytical analyses'' will become
DP systems as if by magic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coughlin:1981:SDM,
author = "Donald T. Coughlin",
title = "System development methodology or system research
methodology?",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "5--6",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A future data processing historian may someday point
to the 1970s as the decade when business application
systems began their adolescent growth period. We
entered the 1970s with few truly on-line business
systems, and many application designers did not fully
appreciate the capabilities and limitation of index
sequential file structures. Many of the larger
companies were busy writing their own tp monitors and
file handling systems, and it is very possible that
more professional hours were being devoted to the
development of control program software than to
applications software. The last decade did provide the
application programmer with new control program tools
such as data base management systems and on-line
terminal control software. It also generated a
continuing demand for computer performance software
specialists to tune application systems immediately
after initial implementation. These performance tuning
efforts often required substantial changes to the
application system --- not just program code but also
basic redesign. Therefore were these really system
development projects or were they system research
projects?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Earle:1981:AAB,
author = "Dennis M. Earle",
title = "An alchemical approach to brokerage",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "7--8",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The essence of the commodities business is the ability
to react quickly to evolving market conditions.
Mocatta, a N.Y. based bullion dealer, is a firm which
uses its Data Processing to provide both front office
(trading) flexibility and back-office capacity to
handle large volume days. The business is characterized
by the constant trade-off of time against money.
Corporate philosophy is to spend money to react quickly
rather than to react slowly but perhaps at lower costs.
The life cycle of a system in this environment normally
begins with a marketing report reflecting a new market
niche which the firm can take advantage of. Data
Processing is involved almost from the inception of the
idea to provide an indication of what existing systems
can do for this new opportunity. Because of the nature
of the business, each new product offered is usually so
unique as to make it impossible for existing systems to
support a new product from a trading point of view.
Back-office applications are somewhat more common
across products, so existing systems can usually
provide some support. The key point is that all we
really know is that we want to market the new product.
Some idea of the time frame in which the product is to
be offered is also obtained. The exact workings of
defining the product and determining the parameters
under which it will be traded usually remain to be
worked out prior to the offering date. This therefore
means that we have, at the point of commitment, the
necessity for evolving data processing support in the
same time frame in which the definition is evolving
about what it is that we are to support.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1981:PAI,
author = "Mitchell G. Spiegel",
title = "Prototyping: an approach to information and
communication system design",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "9--19",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes prototyping, a state-of-the-art
methodology to assist a design team in making a through
definition and analysis of new requirements,
feasibility, alternative selections, workload impact,
system and/or application specification,
implementation, and testing. Suggested prototype tools
and techniques are presented, and guidance is included
to aid a design team in obtaining accurate and timely
results. This paper is not intended to be a complete
text on design. It should be enhanced with a design
team's expertise, consultation from sources with design
experience, and reference to other design literature.
Prototyping is a process (the act, study, or skill) of
modeling an information-communication system
architecture in one or more levels of detail, using
descriptive models, abstract models, and working models
of the system and its component parts (synonym:
archetyping). This work was completed while the author
was working with prior employers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jenkins:1981:APC,
author = "C. Wesley Jenkins",
title = "Application prototyping: a case study",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "21--27",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Accurate specification of user requirements for
interactive systems is especially difficult in an
environment where the demand for information is
intense, short-fused and largely unpredictable. The
Congressional Budget Office was created in 1975 by an
Act of Congress. Its primary mandate is to serve the
Budget and Appropriation committees of both the Senate
and the House of Representatives. The Act also defined
a Congressional Budget process specifying a calendar of
events and specific completion dates for major
activities. This placing of budgetary actions produces
a highly charged environment in which CBO must be able
to respond immediately to information needs with
information that is both accurate and consistent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1981:SRT,
author = "Patricia R. Cox",
title = "Specification of a regression test for a mini computer
operating system",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "29--32",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper I describe the practical problems of
designing a regression test set for an existing
mini-computer operating system. The ideal regression
test would test each function with all possible
combinations of the options for each variation of the
operating system. This is impractical if not impossible
so the alternative is to choose the individual cases
for maximum coverage. To do that the system is viewed
both functionally and structurally and cases are
selected for inclusion in the test set. The method of
selecting the tests is described along with the tools
that will be needed to measure the coverage and to
maintain the test set.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bryan:1981:ASC,
author = "William Bryan and Stanley Siegel and Gary
Whiteleather",
title = "An approach to software configuration control",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "33--47",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to discuss the process by
which a system's life cycle and its associated life
cycle products are managed to ensure the quality and
integrity of the system. We call this process
configuration control. Although many of the ideas in
this paper are applicable to systems in general, the
focus of this paper is on configuration control of
systems with software content. It is becoming apparent
to many, in both government and private industry, that
the high cost of maintenance of existing computer
systems may be attributed to poor configuration control
early in the system's life cycle. For example, in an
article entitled `A Corporate Road, Map for Systems
Development in the `80s, the following claim appears.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fredrick:1981:PIS,
author = "C. R. Fredrick",
title = "Project implementation of {Software Configuration
Management}",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "49--56",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Have you or one of your programmers said: ``The system
ran yesterday; I only changed one line.'' or ``I spent
my budget, but I'm not done.'' or ``I fixed that
problem yesterday, but it's back now.'' or ``I thought
it would be a nice feature for the operator, so I added
it to the program.'' or ``Why was this line of code
changed? Who did it and when?''? If these or other
similar statements are familiar, then Software
Configuration Management is a subject that should
interest you. Software Configuration Management (SCM)
is a management method that establishes a discipline
for the software development process and provides
visibility to that process. The step by step procedures
used by a large software organization to resolve some
of their development problems will be followed here.
The result of their efforts was the formulation of a
management method that significantly improved the
quality of their software products and reduced the
costs. It was learned later that other software
organizations had gone through similar processes and
arrived at similar results. This new tool is now known
as Software Configuration Management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berlack:1981:ISC,
author = "H. Ronald Berlack",
title = "Implementing software configuration control in the
structured programming environment",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "57--77",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The fundamental problems in the control of software
are explored. The elements of control as they relate to
communications is defined, and the implementation of
these elements in solving the fundamental problems and
achieving optimal control during a software development
life cycle, is explained. Control is defined as a
vehicle for communicating changes to established,
agreed-upon baseline points, made up of documents and
subsequent computer programs. By communicating change
to those involved or affected, and obtaining agreement
of the change, one achieves a degree of control that
does not inhibit software engineering innovation or
progress, but helps maintain the project's prime
objectives to deliver maintainable, error-free software
to the ultimate user.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gross:1981:PCV,
author = "Peter Gross",
title = "Producers and consumers views of software quality
(Panel Session)",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "79--79",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "At this very ACM workshop/symposium indicates,
software quality is of great concern to both producers
and users of software. It should be obvious to those
who have attended the earlier sessions today and to
those who will attend the sessions tomorrow that
quality is something that cannot be tested into a
system or added to a system. It must be integral from
the start of the definition of the system's
requirements through each phase of analysis, design,
implementation, integration, testing, and installation.
Software quality implies an engineering type approach
to the development of software. It implies the use of a
disciplined development environment, and the use of
tools and techniques to provide assurances throughout
the software development process that both the software
and its baseline specifications are complete,
consistent, and traceable from one to another.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Henry:1981:RAT,
author = "Sallie Henry and Dennis Kafura and Kathy Harris",
title = "On the relationships among three software metrics",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "81--88",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Automatable metrics of software quality appear to have
numerous advantages in the design, construction and
maintenance of software systems. While numerous such
metrics have been defined, and several of them have
been validated on actual systems, significant work
remains to be done to establish the relationships among
these metrics. This paper reports the results of
correlation studies made among three complexity metrics
which were applied to the same software system. The
three complexity metrics used were Halstead's effort,
McCabe's cyclomatic complexity and Henry and Kafura's
information flow complexity. The common software system
was the UNIX operating system. The primary result of
this study is that Halstead's and McCabe's metrics are
highly correlated while the information flow metric
appears to be an independent measure of complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Szulewski:1981:MSS,
author = "Paul A. Szulewski and Mark H. Whitworth and Philip
Buchan and J. Barton DeWolf",
title = "The measurement of software science parameters in
software designs",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "89--94",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Metrics of software quality have historically focused
on code quality despite the importance of early and
continuous quality evaluation in a software development
effort. While software science metrics have been used
to measure the psychological complexity of computer
programs as well as other quality related aspects of
algorithm construction, techniques to measure software
design quality have not been adequately addressed. In
this paper, software design quality is emphasized. A
general formalism for expressing software designs is
presented, and a technique for identifying and counting
software science parameters in design media is
proposed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Basili:1981:ECS,
author = "Victor R. Basili and Tsai-Yun Phillips",
title = "Evaluating and comparing software metrics in the
software engineering laboratory",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "95--106",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has appeared in the literature a great number of
metrics that attempt to measure the effort or
complexity in developing and understanding software\1.
There have also been several attempts to independently
validate these measures on data from different
organizations gathered by different people\1. These
metrics have many purposes. They can be used to
evaluate the software development process or the
software product. They can be used to estimate the cost
and quality of the product. They can also be used
during development and evolution of the software to
monitor the stability and quality of the product. Among
the most popular metrics have been the software science
metrics of Halstead, and the cyclomatic complexity
metric of McCabe. One question is whether these metrics
actually measure such things as effort and complexity.
One measure of effort may be the time required to
produce a product. One measure of complexity might be
the number of errors made during the development of a
product. A second question is how these metrics compare
with standard size measures, such as the number of
source lines or the number of executable statements,
i.e., do they do a better job of predicting the effort
or the number of errors? Lastly, how do these metrics
relate to each other?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ronback:1981:TMS,
author = "James Ronback",
title = "Test metrics for software quality",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "107--107",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses Bell Northern Research's
experience in utilizing an extended set of test metrics
for assuring the quality of software. The theory and
use of branch and path class coverage is discussed and
the reaction of users in described. This paper also
discusses the effect of using co-resident inspection
procedures in achieving cost-effective testing for a
high degree of test coverage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Benson:1981:AST,
author = "J. P. Benson",
title = "Adaptive search techniques applied to software
testing",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "109--116",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An experiment was performed in which executable
assertions were used in conjunction with search
techniques in order to test a computer program
automatically. The program chosen for the experiment
computes a position on an orbit from the description of
the orbit and the desired point. Errors were interested
in the program randomly using an error generation
method based on published data defining common error
types. Assertions were written for program and it was
tested using two different techniques. The first
divided up the range of the input variables and
selected test cases from within the sub-ranges. In this
way a ``grid'' of test values was constructed over the
program's input space. The second used a search
algorithm from optimization theory. This entailed using
the assertions to define an error function and then
maximizing its value. The program was then tested by
varying all of them. The results indicate that this
search testing technique was as effective as the grid
testing technique in locating errors and was more
efficient. In addition, the search testing technique
located critical input values which helped in writing
correct assertions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paige:1981:DST,
author = "Michael Paige",
title = "Data space testing",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "117--127",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A complete software testing process must concentrate
on examination of the software characteristics as they
may impact reliability. Software testing has largely
been concerned with structural tests, that is, test of
program logic flow. In this paper, a companion software
test technique for the program data called data space
testing is described. An approach to data space
analysis is introduced with an associated notation. The
concept is to identify the sensitivity of the software
to a change in a specific data item. The collective
information on the sensitivity of the program to all
data items is used as a basis for test selection and
generation of input values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:1981:OTP,
author = "Amrit L. Goel",
title = "Optimal testing policies for software systems",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807918",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An important problem of practical concern is to
determine how much testing should be done before a
system is considered ready for release. This decision,
of course, depends on the model for the software
failure phenomenon and the criterion used for
evaluating system readiness. In this paper, we first
develop a cost model based on the time dependent
failure rate function of Goel and Okumoto. Next, we
derive policies that yield the optimal values of the
level of test effort (b*) and software release time
(T*). The sensitivity of the optimal solution is also
numerically evaluated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Littlewood:1981:BDD,
author = "B. Littlewood",
title = "A {Bayesian} differential debugging model for software
reliability",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An assumption commonly made in early models of
software reliability is that the failure rate of a
program is a constant multiple of the number of bugs
remaining. This implies that all bugs have the same
effect upon the overall failure rate. The assumption is
challenged and an alternative proposed. The suggested
model results in earlier bug-fixes having a greater
effect than later ones (the worst bug show themselves
earlier and so are fixed earlier), and the DFR properly
between bug-fixes (confidence in programs increases
during periods of failure-free operation, as well as at
bug-fixes). The model shows a high degree of
mathematical tractability, and allows a range of
reliability, and allows a range of reliability measures
to be calculated exactly. Predictions of total
execution time to achieve a target reliability, are
obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musa:1981:SRMa,
author = "J. D. Musa and A. Iannino",
title = "Software reliability modeling accounting for program
size variation due to integration or design changes",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimation of software reliability quantities has
traditionally been on stable systems; i.e., systems
that are completely integrated and are not undergoing
design changes. Also, it is assumed that test results
are completely inspected for failures. This paper
describes a method for relaxing the foregoing
conditions by adjusting the lengths of the intervals
between failures experienced in tests as compensation.
The resulting set of failure intervals represents the
set that would have occurred for a stable system in its
final configuration with complete inspection. The
failure intervals are then processed as they would be
for a complete system. The approach is developed for
the execution time theory of software reliability, but
the concepts could be applied to many other models the
estimation of quantities of interest to the software
manager are illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musa:1981:SRMb,
author = "John D. Musa",
title = "Software reliability measurement session",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "129--130",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many people think of reliability as a devoutly wished
for but seldom present attribute of a program. This
leads to the idea that one should make a program as
reliable as one possibly can. Unfortunately, in the
real world software reliability is usually achieved at
the expense of some other characteristic of the product
such as program size, run or response time,
maintainability, etc. or the process of producing the
product such as cost, resource requirements,
scheduling, etc. One wishes to make explicit trade-offs
among the software product and process rather than let
them happen by chance. Such trade-offs imply the need
for measurement. Because of mounting development and
operational costs, pressures for obtaining better ways
of measuring reliability, have been mounting. This
session deals with this crucial area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:1981:WST,
author = "Amrit L. Goel and Kazuhira Okumoto",
title = "When to stop testing and start using software?",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "131--138",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During the last decade, numerous studies have been
undertaken to quantify the failure process of large
scale software systems. (see for example, references
1-12.) An important objective of these studies is to
predict software performance and use the information
for decision making. An important decision of practical
concern is the determination of the amount of time that
should be spent in testing. This decision of course
will depend on the model used for describing the
failure phenomenon and the criterion used for
determining system readiness. In this paper we present
a cost model based on the time dependent fault
detection rate model of Goel and Okumoto (4,5) and
describe a policy that yields the optimal value of test
time T.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Littlewood:1981:SRG,
author = "B. Littlewood",
title = "Stochastic reliability growth: a model with
applications to computer software faults and hardware
design faults",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "139--152",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An assumption commonly made in early models of
software reliability is that the failure rate of a
program is a constant multiple of the number of faults
remaining. This implies that all faults have the same
effect upon the overall failure rate. The assumption is
challenged and an alternative proposed. The suggested
model results in earlier fault-fixes having a greater
effect than later ones (the worst faults show
themselves earlier and so are fixed earlier), and the
DFR property between fault-fixes (confidence in
programs increases during periods of failure-free
operations, as well as at fault-fixes). The model shows
a high degree of mathematical tractability, and allows
a range of reliability measures to be calculated
exactly. Predictions of total execution time to achieve
a target reliability, and total number of fault-fixes
to target reliability, are obtained. It is suggested
that the model might also find applications in those
hardware reliability growth situations where design
errors are being eliminated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Decreasing failure rate; Design debugging; Design
errors; Pareto Distribution; Probability distribution
mixture; Programming debugging modelling; Reliability
growth; Software errors; Software failure rate;
Software faults; Software mttf; Software reliability",
}
@Article{Ottenstein:1981:SDS,
author = "Linda M. Ottenstein",
title = "Software defects --- a software science perspective",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "153--155",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper gives a model for computing the programming
time. The results of tests with programs in APL, BASIC,
and FORTRAN are also given and discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ottenstein:1981:PNE,
author = "Linda Ottenstein",
title = "Predicting numbers of errors using software science",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "157--167",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An earlier paper presented a model based on software
science metrics to give quantitative estimate of the
number of bugs in a programming project at the time
validation of the project begins. In this paper, we
report the results from an attempt to expand the model
to estimate the total number of bugs to expect during
the total project development. This new hypothesis has
been tested using the data currently available in the
literature along with data from student projects. The
model fits the published data reasonably well, however,
the results obtained using the student data are not
conclusive.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schneider:1981:SEE,
author = "Victor Schneider",
title = "Some experimental estimators for developmental and
delivered errors in software development projects",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "169--172",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Experimental estimators are presented relating the
expected number of software problem reports (B) in a
software development project to the overall reported
professional effort (E) in ``man months'' the number of
subprograms (n) the overall count of thousands of coded
source statements of software(S). [equation] These
estimators are shown to be consistent with data
obtained from the Air Force's Rome Air Development
Center, the Naval Research Laboratory, and Japan's
Fujitsu Corporation. Although the results are
promising, more data is needed to support the validity
of these estimators.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sneed:1981:SSA,
author = "H. Sneed",
title = "{SOFTDOC} --- {A} system for automated software static
analysis and documentation",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "173--177",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010627.807926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The code itself is useless without adequate
documentation. Besides that it is almost impossible to
validate and verify code unless it is properly
documented. Yet most of the attention of the past has
been devoted to producing code and little to producing
the documentation although it is obvious that it is
necessary both for testing and maintaining the software
product. Software documentation can be classified
according to its usage. Thus, there is a functional
documentation for describing what a system does and
what it is used for, and technical documentation for
describing how the software is constructed and how it
performs its functions. The former is directed toward
the user, the latter toward the tester and maintainer.
The two are, however, highly interrelated. Since the
programmer seldom writes the user documentation it is
necessary for those who describe what the system does,
to know how it does it. An accurate technical
documentation is a prerequisite for producing accurate
user documentation. Finally it serves yet another
purpose. Without it, it is not possible to control the
quality of the software. Software Quality Control
presupposes a full and up to date technical description
in order to assess the characteristics of the system
such as modularity, portability, reliability, etc.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crowley:1981:ADP,
author = "John D. Crowley",
title = "The application development process: {What}'s wrong
with it?",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "179--187",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper will examine the process used in the
development of computer applications. The claim is made
that the current methodology has serious deficiencies,
but that a software development approach is becoming
available to help address these problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bailey:1981:SSU,
author = "C. T. Bailey and W. L. Dingee",
title = "A software study using {Halstead} metrics",
journal = j-SIGMETRICS,
volume = "10",
number = "1",
pages = "189--197",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800003.807928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an application of Maurice
Halstead's software theory to a real time switching
system. The Halstead metrics and the software tool
developed for computing them are discussed. Analysis of
the metric data indicates that the level of the
switching language was not constant across algorithms
and that software error data was not a linear function
of volume.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Esposito:1981:WCT,
author = "A. Esposito and A. Mazzeo and P. Costa",
title = "Workload characterization for trend analysis",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "5--15",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041800",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The methodology of analysis proposed in this paper
aims at predicting the workload of a computer. This
methodology consists of applying an algorithm of
clustering to the workload, its jobs being identified
by a pair $ (X, P) $, where $X$ is the resource-vector
of the job and $P$ stands for the priority given to the
job by the user. The hereby obtained clusters are then
associated to the $ a_i$ activities developed in the
system and determine the influence of each $ a_i$ to
the overall workload. By repeating this operation at
different times, either the periodicity or the
monotonic changes that may occur in each activity are
determined. This makes it possible to predict the
evolution of the overall workload and consequently to
evaluate changes to be carried out in the system. The
above methodology is applied to a specific case and is
illustrated in its various phases. The results obtained
have validated the method. The study is still going on,
with continuous periodical observations in order to
update the data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musa:1981:SRMc,
author = "J. D. Musa and A. Iannino",
title = "Software reliability modeling: accounting for program
size variation due to integration or design changes",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "16--25",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041801",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimation of software reliability quantities has
traditionally been based on stable programs; i.e.,
programs that are completely integrated and are not
undergoing design changes. Also, it is ordinarily
assumed that all code is being executed at one time or
another and that test or operational results are being
completely inspected for failures. This paper describes
a method for relaxing the foregoing conditions by
adjusting the lengths of the intervals between failures
experienced as compensation. The resulting set of
failure intervals represents the set that would have
occurred for a completely inspected program that was at
all times in its final configuration. The failure
intervals are then processed as they would be for a
stable program. The approach is developed for the
execution time theory of software reliability, but the
concepts could be applied to many other models as well.
Many definitions are given to describe program size
variation and associated phenomena. Attention is
focused on the special case of sequential integration
and pure growth. The adjustment method is described and
its benefits in improving the estimation of quantities
of interest to the software manager are illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Comer:1981:CTD,
author = "J. R. Comer and J. R. Rinewalt and M. M. Tanik",
title = "A comparison of two different program complexity
measures",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "26--28",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, a number of program complexity
metrics have been developed to measure various
characteristics of computer programs [1, 3]. Included
among these metrics are Zolnowski's composite measure
of program complexity [4, 5] and McCade's cyclomatic
measure of program complexity [2]. The present paper
examines these two metrics and attempts to measure
their correlation with a third metric assigned by the
program's author. This metric has been called the
psychological complexity or the intuitive complexity of
a program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abrams:1981:NNM,
author = "Marshall D. Abrams and Dorothy C. Neiman",
title = "{NBS} network measurement methodology applied to
synchronous communications",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "29--36",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on the application of the NBS
Network Measurement Instrument (NMI) to synchronous
data communication. The suitability of the underlying
Stimulus --- Acknowledgement --- Response (SAR) model
to support the implementation of this methodology
permitting quantitative evaluation of interactive
teleprocessing service delivered to the user is
described. The logic necessary to interpret SAR
components and boundaries depends on character time
sequence for asynchronous data communications traffic
but entails protocol decomposition and content analysis
for character synchronous data traffic. The
decomposition and analysis rules necessary to evaluate
synchronous communications are discussed and the level
of protocol violation detection which results as a
byproduct is cited. Extensions to the utility of the
Network Measurement Instrument (NMI), deriving from
additional workload profiling measures desirable for
character synchronous communications, are also
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data communications; protocol validation; synchronous;
teleprocessing service evaluation",
}
@Article{Larsen:1981:CEL,
author = "R. L. Larsen and J. R. Agre and A. K. Agrawala",
title = "A comparative evaluation of local area communication
technology",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "37--47",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The builder of a local area network is immediately
confronted with the selection of a communications
architecture to interconnect the elements (hosts and
terminals) of the network. This choice must often be
made in the presence of great uncertainty regarding the
available alternatives and their capabilities, and a
dearth of comparative information. This was the
situation confronting NASA upon seriously considering
local area networks as an architecture for mission
support operations. As a result, a comparative study
was performed in which alternative communication
architectures were evaluated under similar operating
conditions and system configurations. Considered were:
(1) the ring, (2) the cable-bus, (3) a
circuit-switching system, and (4) a shared memory
system. The principle performance criterion used was
the mean time required to move a message from one host
processor to another host processor. Local operations
within each host, such as interrupt service time, were
considered to be part of this overall time. The
performance of each alternative was evaluated through
simulation models and is summarized in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1981:HPT,
author = "Herman D. Hughes",
title = "A highly parameterized tool for studying performance
of computer systems",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "48--65",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A highly parameterized simulation model is described
which allows experiments to be performed for computer
performance evaluations studies. The results of these
experiments can be used to evaluate the effect of
changing the hardware configuration, the workload, the
scheduling policy, the multiprogramming level, etc. The
model is constructed to function either as a batch or
time-sharing system, or as a combination of both. This
simulation model also has the potential of providing
dynamic feedback for the scheduler. A discussion of the
design, implementation, and use of the model is
presented. Examples are provided to illustrate some
possible uses of the model and verifications of the
results obtained from the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cumulative distribution function; events; hardware
configuration; model validation; queue; scheduling
policies; simulation model; system performance;
workloads",
}
@Article{Spiegel:1981:RPP,
author = "Mitchell G. Spiegel",
title = "{RTE}'s: past is prologue",
journal = j-SIGMETRICS,
volume = "10",
number = "2",
pages = "66--73",
month = "Summer",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041799.1041806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:56:45 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper surveys the evolution of Remote Terminal
Emulators (RTEs). Major developments in RTE technology
are separated into three `generations' of products.
Each generation's unique applications and features are
highlighted. Recent developments are noted and a
prediction of future use for RTEs is provided.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Browne:1981:DSP,
author = "J. C. Browne",
title = "Designing systems for performance",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "1--1",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805467",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time systems and systems to interface human work
environments will dominate the growth of computer
applications over the next decade. These systems must
execute their functions with the timeliness and
responsiveness required in these environments. The
design, development and testing of such systems must
guarantee performance as well as functionality and
reliability. There is not yet in place a technology to
support this requirement for engineering of
performance. The research and development community in
performance has focused primarily on analysis and
deduction rather than the performance arena. This talk
will define and discuss the tasks of engineering
performance into software systems and describe the
recent progress towards this goal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiner:1981:MAP,
author = "David Reiner and Tad Pinkerton",
title = "A method for adaptive performance improvement of
operating systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "2--10",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805468",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a method for dynamic modification
of operating system control parameters to improve
system performance. Improved parameter settings are
learned by experimenting on the system. The experiments
compare the performance of alternative parameter
settings in each region of a partitioned
load-performance space associated with the system. The
results are used to modify important control parameters
periodically, responding to fluctuations in system load
and performance. The method can be used to implement
adaptive tuning, to choose between alternative
algorithms and policies, or to select the best fixed
settings for parameters which are not modified. The
method was validated and proved practical by an
investigation of two parameters governing core quantum
allocation on a Sperry Univac 1100 system. This
experiment yielded significant results, which are
presented and discussed. Directions for future research
include automating the method, determining the effect
of simultaneous modifications to unrelated control
parameters, and detecting dominant control
parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1981:VVT,
author = "Y. T. Wang",
title = "On the {VAX\slash VMS} time-critical process
scheduling",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "11--18",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805469",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The VAX/VMS process schedule is briefly described. A
simple priority-driven round-robin queuing model is
then constructed to analyze the behavior of the
time-critical processes of VAX/VMS under such a
schedule. Mean and variance of the conditional response
time of a process at a given priority are derived,
conditioned on the amount of service time required by
that process. Numerical results are given with
comparisons to the ordinary priority queuing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soderlund:1981:ECP,
author = "Lars S{\"o}derlund",
title = "Evaluation of concurrent physical database
reorganization through simulation modeling",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "19--32",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805470",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a database system commonly
deteriorates due to degradation of the database's
physical data structure. The structure degradation is a
consequence of the normal operations of a general
database management system. When system performance has
degraded below acceptable limits the database must be
reorganized. In conventional, periodic reorganization
the database, or part of it, is taken off line while
the data structure is being reorganized. This paper
presents results from a study where it is shown that
concurrent reorganization, i.e. a continuous
reorganization of the physical data structure while
application processes have full access to the database,
is an attractive alternative to conventional
reorganization. The paper also presents a solution to a
methodological problem concerning the simulation of a
system which has activities with extremely varying
durations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazowska:1981:AMD,
author = "Edward D. Lazowska and John Zahorjan",
title = "Analytic modelling of disk {I/O} subsystems: a
tutorial",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "33--35",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805471",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a summary of a tutorial presented during the
conference discussing a number of approaches to
representing disk I/O subsystems in analytic models of
computer systems. As in any analytic modelling study,
the fundamental objective in considering an I/O
subsystem is to determine which devices should be
represented in the model, and what their loadings
should be. The device loadings represent the service
required by jobs, and are the basic parameters needed
by the computational algorithm which calculates
performance measures for the model. To set these
parameters, knowledge of service times at the various
devices in the I/O subsystem is required. The tutorial
begins by distinguishing analytic modelling from
alternative approaches, by identifying the parameter
values that are required for an analytic modelling
study, and by explaining the role of the computational
algorithm that is employed (Denning \& Buzen [1978]
provide a good, although lengthy, summary). We then
consider a sequence of models of increasingly complex
I/O subsystems. Next we discuss I/O subsystems with
rotational position sensing. We then discuss approaches
to modelling shared DASD, emphasizing hierarchical
techniques in which highlevel models of each system can
be analyzed in isolation. We also mention recent
techniques for modelling complex I/O subsystems
involving multipathing. Finally, we discuss the
analysis of I/O subsystems based on broadcast channels
such as Ethernet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dowdy:1981:MUS,
author = "Lawrence W. Dowdy and Hans J. Breitenlohner",
title = "A model of {Univac 1100\slash 42} swapping",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "36--47",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805472",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a computer system depends upon the
efficiency of its swapping mechanisms. The swapping
efficiency is a complex function of many variables. The
degree of multiprogramming, the relative loading on the
swapping devices, and the speed of the swapping devices
are all interdependent variables that affect swapping
performance. In this paper, a model of swapping
behavior is given. The interdependencies between the
degree of multiprogramming, the swapping devices'
loadings, and the swapping devices' speeds are modeled
using an iterative scheme. The validation of a model is
its predictive capability. The given swapping model was
applied to a Univac 1100/42 system to predict the
effect of moving the swapping activity from drums to
discs. When the swapping activity was actually moved,
throughput increased by 20\%. The model accurately
predicted this improvement. Subtopics discussed
include: (1) the modeling of blocked and overlapped
disc seek activity, (2) the usefulness of empirical
formulae, and (3) the calibration of unmeasurable
parameters. Extensions and further applications of the
model are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Closed queuing networks; Model validation; Parameter
interdependencies; Performance prediction; Swapping",
}
@Article{Turner:1981:SFP,
author = "Rollins Turner and Henry Levy",
title = "Segmented {FIFO} page replacement",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "48--51",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805473",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A fixed-space page replacement algorithm is presented.
A variant of FIFO management using a secondary FIFO
buffer, this algorithm provides a family of performance
curves lying between FIFO and LRU. The implementation
is simple, requires no periodic scanning, and uses no
special hardware support. Simulations are used to
determine the performance of the algorithm for several
memory reference traces. Both the fault rates and
overhead cost are examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FIFO page replacement; LRU page replacement; Page
replacement algorithms; Performance evaluation",
}
@Article{Ferrari:1981:GMW,
author = "Domenico Ferrari",
title = "A generative model of working set dynamics",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "52--57",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805474",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An algorithm for generating a page reference string
which exhibits a given working set size behavior in the
time domain is presented, and the possible applications
of such a string are discussed. The correctness of the
algorithm is proved, and its computational complexity
found to be linear in the length of the string. A
program implementing the algorithm, which is performed
in one pass and requires very little space, is briefly
described, and some experimental results are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zahorjan:1981:BJB,
author = "J. Zahorjan and K. C. Sevcik and D. L. Eager and B. I.
Galler",
title = "Balanced job bound analysis of queueing networks",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "58--58",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805475",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Applications of queueing network models to computer
system performance prediction typically involve the
computation of their equilibrium solution. When
numerous alternative systems are to be examined and the
numbers of devices and customers are large, however,
the expense of computing the exact solutions may not be
warranted by the accuracy required. In such situations,
it is desirable to be able to obtain bounds on the
system solution with very little computation.
Asymptotic bound analysis (ABA) is one technique for
obtaining such bounds. In this paper, we introduce
another bounding technique, called balanced job bounds
(BJB), which is based on the analysis of systems in
which all devices are equally utilized. These bounds
are tighter than ABA bounds in many cases, but they are
based on more restrictive assumptions (namely, those
that lead to separable queueing network models).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neuse:1981:SHA,
author = "D. Neuse and K. Chandy",
title = "{SCAT}: a heuristic algorithm for queueing network
models of computing systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "59--79",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new algorithm for the
approximate analysis of closed product-form queueing
networks with fixed-rate, delay (infinite-server), and
load-dependent queues. This algorithm has the accuracy,
speed, small memory requirements, and simplicity
necessary for inclusion in a general network analysis
package. The algorithm allows networks with large
numbers of queues, job classes, and populations to be
analyzed interactively even on microcomputers with very
limited memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Analytic models; Approximations; Iterative algorithms;
Load-dependent queues; Performance analysis;
Product-form; Queueing networks",
}
@Article{Zahorjan:1981:SSQ,
author = "John Zahorjan and Eugene Wong",
title = "The solution of separable queueing network models
using mean value analysis",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "80--85",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Because it is more intuitively understandable than the
previously existing convolution algorithms, Mean Value
Analysis (MVA) has gained great popularity as an exact
solution technique for separable queueing networks.
However, the derivations of MVA presented to date apply
only to closed queueing network models. Additionally,
the problem of the storage requirement of MVA has not
been dealt with satisfactorily. In this paper we
address both these problems, presenting MVA solutions
for open and mixed load independent networks, and a
storage maintenance technique that we postulate is the
minimum possible of any ``reasonable'' MVA technique.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1981:ASQ,
author = "Alexander Thomasian and Behzad Nadji",
title = "Aggregation of stations in queueing network models of
multiprogrammed computers",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "86--104",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In queueing network models the complexity of the model
can be reduced by aggregating stations. This amounts to
obtaining the throughput of the flow-equivalent station
for the subnetwork of stations to be aggregated. When
the subnetwork has a separable solution, aggregation
can be carried out using the Chandy--Herzog--Woo
theorem. The throughput of the subnetwork can be
expressed explicitly in terms of its parameters when
the stations are balanced (have equal utilizations).
This expression for throughput can be used as an
approximation when the stations are relatively
unbalanced. The basic expression can be modified to
increase the accuracy of the approximation. A
generating function approach was used to obtain upper
bounds on the relative error due to the basic
approximation and its modifications. Provided that the
relative error bound is tolerable, a set of unbalanced
stations can be replaced by a single aggregate station
or a set of balanced stations. Finally, we propose a
methodology to simplify the queueing network model of a
large-scale multiprogrammed computer, which makes use
of the previous aggregation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schwetman:1981:CSM,
author = "Herb Schwetman",
title = "Computer system models: an introduction",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "105--105",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805479",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A system model is a tool used to predict system
performance under changing conditions. There are two
widely used modeling techniques: one based on discrete
event simulation and one based on queuing theory
models. Because queueing theory models are so much
cheaper to implement and use, as compared to simulation
models, there is growing interest in them. Users are
developing and using queuing theory models to project
system performance, project capacity, analyze
bottlenecks and configure systems. This talk uses an
operational analysis approach to develop system models.
This approach, as presented in Denning and Buzen [1],
provides an intuitive basis for analyzing system
performance and constructing system models. Very simple
calculations lead to estimates of bounds on performance
--- maximum job throughput rates and minimum message
response times. The emphasis is on gaining an
understanding of system models which reenforces
intuition, not on mathematical formulae. Several
examples are included. References to other works and
publications are provided. Application areas and
limitations of modeling techniques are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denning:1981:PEE,
author = "Peter J. Denning",
title = "Performance evaluation: {Experimental} computer
science at its best",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "106--109",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What is experimental computer science? This question
has been widely discussed ever since the Feldman Report
was published (1979 [18]). Many computer scientists
believe that survival of their discipline is intimately
linked to their ability to rejuvenate experimentation.
The National Science Foundation instituted the
Coordinated Experimental Research Program (CERP) in
1979 to help universities set up facilities capable of
supporting experimental research. Other agencies of
government are considering similar programs. Some
industrial firms are offering similar help through
modest cash grants and equipment discounts. What is
experimental computer science? Surprisingly, computer
scientists disagree on the answer. A few believe that
computer science is in flux --- making a transition
from theoretical to experimental science --- and,
hence, no operational definition is yet available. Some
believe that it is all the non-theoretical activities
of computer science, especially those conferring
``hands-on'' experience. Quite a few believe that it is
large system development projects --- i.e., computer
and software engineering --- and they cite MIT's
Multics, Berkeley's version of Bell Labs' UNIX, the
ARPAnet, IBM's database System R, and Xerox's
Ethernet-based personal computer network as examples.
These beliefs are wrong. There are well-established
standards for experimental science. The field of
performance evaluation meets these standards and
provides examples of experimental science for the rest
of the computing field.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rafii:1981:SAM,
author = "Abbas Rafii",
title = "Structure and application of a measurement tool ---
{SAMPLER\slash 3000}",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "110--120",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Design, internal structure, implementation experience
and a number of unique features of the SAMPLER/3000
performance evaluation tool are presented. This package
can be used to produce program CPU and wait time
profiles in several levels of detail in terms of code
segments, procedure names and procedure relative
addresses. It also provides an accurate profile of the
operating systems code which is exercised to service
requests from the selective parts of the user code.
Programs can be observed under natural load conditions
in a single user or shared environment. A program's CPU
usage is determined in terms of direct and indirect
cost components. The approaches to determine direct and
indirect CPU times are described. A program counter
sampling technique in virtual memory domain is
discussed. Certain interesting aspects of data analysis
and on-line data presentation techniques are described.
The features of the computer architecture, the services
of the loader and compilers which relate to the
operation of the tool are discussed. A case study is
finally presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tolopka:1981:ETM,
author = "Stephen Tolopka",
title = "An event trace monitor for the {VAX 11\slash 780}",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "121--128",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an event trace monitor
implemented on Version 1.6 of the VMS operating system
at Purdue University. Some necessary VMS terminology is
covered first. The operation of the data gathering
mechanism is then explained, and the events currently
being gathered are listed. A second program, which
reduces the data gathered by the monitor to usable
form, is next examined, and some examples depicting its
operation are given. The paper concludes with a brief
discussion of some of the monitor's uses.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Artis:1981:LFD,
author = "H. Pat Artis",
title = "A log file design for analyzing secondary storage
occupancy",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "129--135",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805483",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A description of the design and implementation of a
log file for analyzing the occupancy of secondary
storage on IBM computer systems is discussed. Typical
applications of the data contained in the log are also
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sanguinetti:1981:ESS,
author = "John Sanguinetti",
title = "The effects of solid state paging devices in a large
time-sharing system",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "136--153",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports the results of some measurements
taken on the effects two new solid state paging
devices, the STC 4305 and the Intel 3805, have on
paging performance in the Michigan Terminal System at
the University of Michigan. The measurements were taken
with a software monitor using various configurations of
the two solid state devices and the fixed head disk,
which they replace. Measurements were taken both during
regular production and using an artificial load created
to exercise the paging subsystem. The results confirmed
the expectation that the solid state paging devices
provide shorter page-in waiting times than the
fixed-head disk, and also pointed up some of the
effects which their differing architectures have on the
system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1981:VMB,
author = "Richard T. Wang and J. C. Browne",
title = "Virtual machine-based simulation of distributed
computing and network computing",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "154--156",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes the use of virtual machine
architectures as a means of modeling and analyzing
networks and distributed computing systems. The
requirements for such modeling and analysis are
explored and defined along with an illustrative study
of an X.25 link-level protocol performance under normal
execution conditions. The virtualizable architecture
used in this work is the Data General Nova 3/D.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huslende:1981:CEP,
author = "Ragnar Huslende",
title = "A combined evaluation of performance and reliability
for degradable systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "157--164",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the field of fault-tolerant computing is maturing
and results from this field are taken into practical
use the effects of a failure in a computer system need
not be catastrophic. With good fault-detection
mechanisms it is now possible to cover a very high
percentage of all the possible failures that can occur.
Once a fault is detected, systems are designed to
reconfigure and proceed either with full or degraded
performance depending on how much redundancy is built
into the system. It should be noted that one particular
failure may have different effects depending on the
circumstances and the time at which it occurs. Today we
see that large numbers of resources are being tied
together in complex computer systems, either locally or
in geographically distributed systems and networks. In
such systems it is obviously very undesirable that the
failure of one element can bring the entire system
down. On the other hand one can usually not afford to
design the system with sufficient redundancy to mask
the effect of all failures immediately.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacobson:1981:MSD,
author = "Patricia A. Jacobson and Edward D. Lazowska",
title = "The method of surrogate delays: {Simultaneous}
resource possession in analytic models of computer
systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "165--174",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805487",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new approach to modelling the
simultaneous or overlapped possession of resources in
queueing networks. The key concept is that of iteration
between two models, each of which includes an explicit
representation of one of the simultaneously held
resources and a delay server (an infinite server, with
service time but no queueing) acting as a surrogate for
queueing delay due to congestion at the other
simultaneously held resource. Because of this, we refer
to our approximation technique as the ``method of
surrogate delays''.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacobson:1981:AAM,
author = "Patricia Jacobson",
title = "Approximate analytic models of arbiters",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "175--180",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Results at very light and very heavy loads are easy to
obtain, but at intermediate loads performance modelling
is necessary. Because of the considerable cost of
simulation, we develop queueing network models which
can be solved quickly by approximate analytic
techniques. These models are validated by comparing
with simulations at certain points, and then used to
get a wide range of results quickly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Briggs:1981:PCB,
author = "Fay{\'e} A. Briggs and Michel Dubois",
title = "Performance of cache-based multiprocessors",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "181--190",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805489",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A possible design alternative to improve the
performance of a multiprocessor system is to insert a
private cache between each processor and the shared
memory. The caches act as high-speed buffers, reducing
the memory access time, and affect the delays caused by
memory conflicts. In this paper, we study the
performance of a multiprocessor system with caches. The
shared memory is pipelined and interleaved to improve
the block transfer rate, and assumes an L-M
organization, previously studied under random word
access. An approximate model is developed to estimate
the processor utilization and the speedup improvement
provided by the caches. These two parameters are
essential to a cost-effective design. An example of a
design is treated to illustrate the usefulness of this
investigation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bryant:1981:QNA,
author = "R. M. Bryant and J. R. Agre",
title = "A queueing network approach to the module allocation
problem in distributed systems",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "191--204",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800189.805490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Given a collection of distributed programs and the
modules they use, the module allocation problem is to
determine an assignment of modules to processors that
minimizes the total execution cost of the programs.
Standard approaches to this problem are based on
solving either a network flow problem or a constrained
$0$-$1$ integer programming problem. In this paper we
discuss an alternative approach to the module
allocation problem where a closed, multiclass queueing
network is solved to determine the cost of a particular
module allocation. The advantage of this approach is
that the execution cost can be expressed in terms of
performance measures of the system such as response
time. An interchange heuristic is proposed as a method
of searching for a good module allocation using this
model and empirical evidence for the success of the
heuristic is given. The heuristic normally finds module
allocations with costs within 10 percent of the optimal
module allocation. Fast, approximate queueing network
solution techniques based on mean-value-analysis allow
each heuristic search to be completed in a few seconds
of CPU time. The computational complexity of each
search is $ O(M K (K + N) C)$ where $M$ is the number
of modules, $K$ is the number of sites in the network,
$N$ is the number of communications processors, and $C$
is the number of distributed program types. It appears
that substantial problems of this type could be solved
using the methods we describe.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Distributed computer systems; File assignment problem;
Mean-value analysis; Multiclass queueing network model;
Task allocation problem",
}
@Article{Marathe:1981:AME,
author = "Madhav Marathe and Sujit Kumar",
title = "Analytical models for an {Ethernet}-like local area
network link",
journal = j-SIGMETRICS,
volume = "10",
number = "3",
pages = "205--215",
month = "Fall",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010629.805491",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:00 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ethernet-like local area network links have been
studied by a number of researchers. Most of these
studies have involved extensive simulation models
operating at the level of individual packets. However,
as we begin building models of systems built around
such links, detailed simulation models are neither
necessary, nor cost-effective. Instead, a simple
analytical model of the medium should be adequate as a
component of the higher level system models. This paper
discusses a number of analytical models and identifies
a last-in-first-out M/G/1 model with slightly increased
service time as one which adequately captures both the
mean and the coefficient of variation of the response
time. Given any offered load, this model can be used to
predict the mean waiting time and its coefficient of
variation. These two can be used to construct a
suitable 2 stage hyperexponential distribution. Random
numbers can then be drawn from this distribution for
use as waiting times of individual packets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pechura:1981:PLM,
author = "Michael A. Pechura",
title = "Page life measurements",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "10--12",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Clark:1981:UES,
author = "Jon D. Clark",
title = "An update on economies-of-scale in computing systems",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "13--14",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Janusz:1981:GMS,
author = "Edward R. Janusz",
title = "Getting the most out of a small computer",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "22--35",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1981:DDD,
author = "Springer Cox",
title = "Data, definition, deduction: an empirical view of
operational analysis",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "36--44",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muramatsu:1981:SVQ,
author = "Hiroshi Muramatsu and Masahiro Date and Takanori
Maki",
title = "Structural validation in queueing network models of
computer systems",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "41--46",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sauer:1981:NSS,
author = "Charles H. Sauer",
title = "Numerical solution of some multiple chain queueing
networks",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "47--56",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports the results of simulation
experiment of a model of a virtual memory computer. The
model consists of three major subsystems: Program
Behavior, Memory Allocation and Secondary Storage. By
adapting existing models of these subsystems an overall
model for the computer operation is developed and its
performance is tested for various design alternatives.
The results are reported for different paging devices,
levels of multiprogramming, job mixes, memory
allocation scheme, page service scheduling and page
replacement rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nemeth:1981:AIP,
author = "Thomas A. Nemeth",
title = "An approach to interactive performance analysis in a
busy production system {(NOS/BE)}",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "57--73",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041808.1041815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many different ideas have been promulgated on
performance evaluation by software and hardware
monitoring or modelling, but most of these have
associated implementation problems in practice. By
adopting a slightly different approach, (using an
approximation to `service wait time'), an analysis of
response is possible in a production system, with
negligible overhead. This analysis allows the actual
areas of contention to be identified, and some rather
unexpected results emerge, with a direct application to
scheduling policy. The work was done using the NOS/BE
operating system on a CDC Cyber 173 at the University
of Adelaide.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measurement; multiprogramming; performance evaluation;
production; response; scheduling; timesharing",
}
@Article{Knudson:1981:CPE,
author = "Michael E. Knudson",
title = "A computer performance evaluation operational
methodology",
journal = j-SIGMETRICS,
volume = "10",
number = "4",
pages = "74--80",
month = dec,
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041808.1041816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:57:58 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method suggesting how to organize and operate a
Computer Performance and Evaluation (CPE) project is
presented. It should be noted that the suggested
principles could apply to a modeling or simulation
effort.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Afshari:1981:MNT,
author = "P. V. Afshari and S. C. Bruell and R. Y. Kain",
title = "Modeling a new technique for accessing shared buses",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "4--13",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801685",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a queueing system in which customers (or
jobs) arrive to one of $Q$ separate queues to await
service from one of $S$ identical servers (Figure 1).
Once a job enters a queue it does not leave that queue
until it has been selected for service. Any server can
serve any job from any queue. A job selected for
service cannot be preempted. In this paper we consider
jobs to be in a single class; for the multiple class
result see [AFSH81a]. We assume once a queue has been
selected, job scheduling from that queue is fair. In
particular, our results hold for first come first serve
as well as random selection [SPIR79] and, for that
matter, any fair nonpreemptive scheduling policy within
a queue. We assume that arrivals to each queue follow a
Poisson process with the mean arrival rate to queue $q$
being $ \lambda q$. The $S$ identical exponential
servers are each processing work at a mean rate of $
\mu $. This system is general enough to be adaptable
for modeling many different applications. By choosing
the policy employed for queue selection by the servers,
we can model multiplexers, channels, remote job entry
stations, certain types of communication processors
embedded in communication networks, and sets of shared
buses. In this paper we will use the latter application
to discuss a realistic situation. The elements
(``jobs'') in the queues are messages to be sent from
modules connected to the shared bus of the system. The
servers are the buses; their service times are equal to
the message transmission times. The queues are in the
interface modules connected to and sharing the buses.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazar:1981:OCM,
author = "Aurel A. Lazar",
title = "Optimal control of a {M\slash M\slash m} queue",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "14--20",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801686",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of optimal control of a M/M/m queueing
system is investigated. As in the M/M/l case the
optimum control is shown to be a window type mechanism.
The window size $L$ depends on the maximum allowable
time delay $T$ and can be explicitly computed. The
throughput time delay function of the M/M/m system is
briefly discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spirn:1981:NMB,
author = "Jeffrey R. Spirn",
title = "Network modeling with bursty traffic and finite buffer
space",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "21--28",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801687",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a class of queueing network
models, and a method for their approximate solution,
for computer networks with bursty traffic and finite
buffer space. The model is open, implying no population
limit except for buffer size limits and therefore no
window-type flow control mechanism. Each node of the
computer network is represented as a finite-length
queue with exponential service and an arrival process
which is initially bulk Poisson, but becomes less and
less clustered from hop to hop. Elaborations are
possible to account for varying mean packet sizes and
certain buffer pooling schemes, although these involve
further approximation. The approximations of the method
were validated against several simulations, with
reasonable agreement, and certainly with much less
error than is obtained by modeling a bursty traffic
source as Poisson.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:1981:ORN,
author = "Simon S. Lam and Y. Luke Lien",
title = "Optimal routing in networks with flow-controlled
virtual channels",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "38--46",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet switching networks with flow-controlled virtual
channels are naturally modeled as queueing networks
with closed chains. Available network design and
analysis techniques, however, are mostly based upon an
open-chain queueing network model. In this paper, we
first examine the traffic conditions under which an
open-chain model accurately predicts the mean
end-to-end delays of a closed-chain model having the
same chain throughputs. We next consider the problem of
optimally routing a small amount of incremental traffic
corresponding to the addition of a new virtual channel
(with a window size of one) to a network. We model the
new virtual channel as a closed chain. Existing flows
in the network are modeled as open chains. An optimal
routing algorithm is then presented. The algorithm
solves a constrained optimization problem that is a
compromise between problems of unconstrained
individual-optimization and unconstrained
network-optimization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Livny:1981:LBH,
author = "Miron Livny and Myron Melman",
title = "Load balancing in homogeneous broadcast distributed
systems",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "47--55",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Three different load balancing algorithms for
distributed systems that consist of a number of
identical processors and a CSMA communication system
are presented in this paper. Some of the properties of
a multi-resource system and the balancing process are
demonstrated by an analytic model. Simulation is used
as a mean for studying the interdependency between the
parameters of the distributed system and the behaviour
of the balancing algorithm. The results of this study
shed light on the characteristics of the load balancing
process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wecker:1981:PGD,
author = "Stuart Wecker and Robert Gordon and James Gray and
James Herman and Raj Kanodia and Dan Seligman",
title = "Performance of globally distributed networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "58--58",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801690",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the design and implementation of computer networks
one must be concerned with their overall performance
and the efficiency of the communication mechanisms
chosen. Performance is a major issue in the
architecture, implementation, and installation of a
computer communication network. The architectural
design always involves many cost/performance tradeoffs.
Once implemented, one must verify the performance of
the network and locate bottlenecks in the structure.
Configuration and installation of a network involves
the selection of a topology and communication
components, channels and nodes of appropriate capacity,
satisfying performance requirements. This panel will
focus on performance issues involved in the efficient
design, implementation, and installation of globally
distributed computer communication networks.
Discussions will include cost/performance tradeoffs of
alternative network architecture structures, methods
used to measure and isolate implementation performance
problems, and configuration tools to select network
components of proper capacity. The panel members have
all been involved in one or more performance issues
related to the architecture, implementation, and/or
configuration of the major networks they represent.
They will describe their experiences relating to
performance issues in these areas. Methodologies and
examples will be chosen from these networks in current
use. There will be time at the end of the session for
questions to the panel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gordon:1981:OMH,
author = "R. L. Gordon",
title = "Operational measurements on a high performance ring",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "59--59",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801691",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Application and system software architecture can
greatly influence the operational statistics of a local
network. The implementation of a transparent file
system on top of a high bandwidth local network has
resulted in generating a high degree of file traffic
over the local network whose characteristics are
largely fixed and repeatable. These statistics will be
presented along with arguments for and against
designing mechanisms that optimize specifically for
that class of traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Local networks; Performance; Remote files",
}
@Article{Gray:1981:PSL,
author = "James P. Gray",
title = "Performance of {SNA}'s {LU-LU} session protocols",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "60--61",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SNA is both an architecture and a set of products
built in conformance with the architecture (1,2,3). The
architecture is layered and precisely defined; it is
both evolutionary and cost effective for implementing
products. Perhaps the largest component of cost
effectiveness is performance: transaction throughput
and response times. For SNA, this involves data link
control protocols (for SDLC and S/370 channel DLC's),
routing algorithms, protocols used on the sessions that
connect logical units (LU-LU session protocols), and
interactions among them. SNA's DLC and routing
protocols have been discussed elsewhere (4,5,6); this
talk examines protocols on sessions between logical
units (LU-LU session protocols) and illustrates the
results of design choices by comparing the performance
of various configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Herman:1981:APT,
author = "James G. Herman",
title = "{ARPANET} performance tuning techniques",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "62--62",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As part of its operation and maintenance of the
ARPANET for the past twelve years, BBN has been asked
to investigate a number of cases of degradation in
network performance. This presentation discusses the
practical methods and tools used to uncover and correct
the causes of these service problems. A basic iterative
method of hypothesis generation, experimental data
gathering, and analysis is described. Emphasis is
placed on the need for experienced network analysts to
direct the performance investigation and for the
availability of network programmers to provide special
purpose modifications to the network node software in
order to probe the causes of the traffic patterns under
observation. Many typical sources of performance
problems are described, a detailed list of the tools
used by the analyst are given, and a list of basic
techniques provided. Throughout the presentation
specific examples from actual ARPANET performance
studies are used to illustrate the points made.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aleh:1981:DUB,
author = "Avner Aleh and K. Dan Levin",
title = "The determination of upper bounds for economically
effective compression in packet switching networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "64--72",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801694",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper deals with the economic tradeoffs
associated with data compression in a packet switching
environment. In section II we present the data profile
concept and the compression analysis of typical
file-transfer data strings. This is followed by a
compression cost saving model that is developed in
section III. Upper bounds for an economically effective
compression service are derived there, and the paper
concludes with an example of these bounds based on
state of the art technology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McGregor:1981:CMP,
author = "Patrick V. McGregor",
title = "Concentrator modeling with pipelining arrivals
compensation",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "73--94",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801695",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A general model of Intelligent Communications
Concentrating Devices (ICCD) is presented and analyzed
for delay and overflow performance with compensation
for the pipelining effect of message arrivals extending
over time. The results of the analysis indicate that,
for the same trunk utilization, the trend towards
buffered terminals with longer messages requires
substantially greater buffering in the ICCD. The
nominal environment analyzed consisted of 10--40 medium
speed terminals (1200 b/s--9600 b/s) operating over a
medium speed trunk (9600 b/s) with trunk utilizations
from 20 percent to 80 percent and average message
lengths up to 1000 characters. This is a substantially
different environment than that typically served by
current implementations of ICCDs, which are frequently
reported to have throughput improvements of 2-3 times
the nominal originating terminal bandwidths, as opposed
to the typical factor of 5 for the analyzed
environment. This does not reflect on the
appropriateness of the ICCDs in serving the new
environment, but rather is simply stating that in the
new environment the same character volume of traffic
may be appearing with different traffic characteristics
over higher speed access lines. If the new environment
shows only a difference in traffic characteristics and
originating line speed, without change in the traffic
control scheme (or lack of scheme), the results
indicate essentially reproduction of a large part of
the terminal buffering in the ICCD for adequate
overflow performance. Alternatively, with smarter
terminals, traffic control schemes (flow control) may
enable the ICCD to be reduced to an essentially
unbuffered ``traffic cop,'' with the terminal buffering
also serving as the shared facility buffering. Several
practical implementations of ICCDs have provision for
flow control, but require cooperating terminals and
hosts. This suggests that ICCD design and application
will become more sensitive to the practical operating
features of the target environment than has been
generally the case to date. The analysis presented in
this paper involves many simplifications to the actual
problem. Additional work to accommodate non-exponential
message length distributions and heterogeneous terminal
configurations are perhaps two of the more immediate
problems that may be effectively dealt with.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mink:1981:MEC,
author = "Alan Mink and Charles B. {Silio, Jr.}",
title = "Modular expansion in a class of homogeneous networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "95--100",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a special class of homogeneous computer
network comprising several essentially identical but
independent computing systems (ICSs) sharing a single
resource. Of interest here are the effects of modularly
expanding the network by adding ICSs. We use a
previously presented approximate queueing network model
to analyze modular expansion in this class of network.
The performance measure used in this analysis is the
mean cycle time, which is the mean time between
successive requests for service by the same job at the
CPU of an ICS. In this analysis we derive an
intuitively satisfying mathematical relation between
the addition of ICSs and the incremental increase in
the service rate of the shared resource required to
maintain the existing level of system performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thareja:1981:UBA,
author = "Ashok K. Thareja and Satish K. Tripathi and Richard A.
Upton",
title = "On updating buffer allocation",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "101--110",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most of the analysis of buffer sharing schemes has
been aimed at obtaining the optimal operational
parameters under stationary load situations. It is well
known that in most operating environments the traffic
load changes. In this paper, we address the problem of
updating buffer allocation as the traffic load at a
network node changes. We investigate the behavior of a
complete partitioning buffer sharing scheme to gain
insight into the dependency of the throughput upon
system parameters. The summary of the analysis is
presented in the form of a heuristic. The heuristic is
shown to perform reasonably well under two different
types of stress tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elsanadidi:1981:ATW,
author = "M. Y. Elsanadidi and Wesley W. Chu",
title = "An analysis of a time window multiaccess protocol with
collision size feedback {(WCSF)}",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "112--118",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the performance of a window multiaccess
protocol with collision size feedback. We obtain bounds
on the throughput and the expected packet delay, and
assess the sensitivity of the performance to collision
recognition time and packet transmission time. An
approximate optimal window reduction factor to minimize
packet isolation time is {equation}, where $n$ is the
collision size and $R$ the collision recognition time
(in units of packet propagation delay). The WCSF
protocol, which requires more information than CSMA-CD,
is shown to have at least 30\% more capacity than
CSMA-CD for high bandwidth channels; that is, when
packet transmission time is comparable to propagation
delay. The capacity gain of the WCSF protocol decreases
as the propagation delay decreases and the collision
recognition time increases. Our study also reveals the
inherent stability of WCSF. When the input load
increases beyond saturation. The throughput remains at
its maximum value.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roehr:1981:PALa,
author = "Kuno M. Roehr and Horst Sadlowski",
title = "Performance analysis of local communication loops",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "119--129",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The communication loops analyzed here provide an
economic way of attaching many different terminals
which may be some kilometers away from a host
processor. Main potential bottlenecks were found to be
the loop transmission speed, the loop adapter
processing rate, and the buffering capability, all of
which are analyzed in detail. The buffer overrun
probabilities are found by convolving individual buffer
usage densities and by summing over the tail-end of the
obtained overall density function. Examples of analysis
results are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sherman:1981:DVH,
author = "R. H. Sherman and M. G. Gable and A. W. Chung",
title = "Distributed virtual hosts and networks: {Measurement}
and control",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "130--136",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801700",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Diverse network application requirements bring about
local networks of various size, degree of complexity
and architecture. The purpose of this paper is to
present a network protocol layer which is used to
provide a homogeneous operating environment and to
ensure the availability of network resources. The
network layer process probes the underlying local
network to discover its properties and then adapts to
changing network conditions. The principle contribution
of this paper is to generalize properties of diverse
local networks which can be measured. This is important
when considering maintenance and service of various
communication links. Three type of links are
point-to-point links, multi-drop, loop or switched
links and multi-access contention data buses. A
prototype network is used to show a complexity
improvement in the number of measurement probes
required using a multi-access contention bus. Examples
of measurement techniques and network adaptation are
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brice:1981:NPA,
author = "Richard Brice and William Alexander",
title = "A network performance analyst's workbench",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "138--146",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801701",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance measurement and analysis of the behavior
of a computer network usually requires the application
of multiple software and hardware tools. The location,
functionality, data requirements, and other properties
of the tools often reflect the distribution of
equipment in the network. We describe how we have
attempted to organize a collection of tools into a
single system that spans a broad subset of the
measurement and analysis activities that occur in a
complex network of heterogeneous computers. The tools
are implemented on a pair of dedicated midicomputers. A
database management system is used to couple the data
collection and analysis tools into a system highly
insulated from evolutionary changes in the composition
and topology of the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DuBois:1981:HMS,
author = "Donald F. DuBois",
title = "A {Hierarchical Modeling System} for computer
networks",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "147--155",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the Hierarchical Modeling System
(HMS). HMS is a tool --- a unified and expandable
system --- which supports the development of analytic
and simulator models of computer networks. The same
system and workload descriptions can be interpreted as
analytic queueing models with optimization techniques
or as discrete event simulation models. The rationale
behind the development of HMS is that high level
analyses incorporating analytic techniques may be used
in the early design phase for networks when many
options are considered while detailed simulation
studies of fewer design alternatives are appropriate
during the later stages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Analytic models; Computer networks; Hierarchical
models; Performance evaluation; Simulation",
}
@Article{Terplan:1981:NPR,
author = "K. Terplan",
title = "Network performance reporting",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "156--170",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Managing networks using Network Administration Centers
is increasingly considered. After introducing the
information demand for operational, tactical and
strategic network management the paper is dealing with
the investigation of the applicability of tools and
techniques for these areas. Network monitors and
software problem determination tools are investigated
in greater detail. Also implementation details for a
multihost-multinode network including software and
hardware tools combined by SAS are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Spiegel:1981:QLA,
author = "Mitchell G. Spiegel",
title = "Questions for {Local Area Network} panelists",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "172--172",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801704",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Much has been written and spoken about the
capabilities of emerging designs for Local Area
Networks (LAN's). The objective for this panel session
was to gather together companies and agencies that have
brought LAN's into operation. Questions about the
performance of LANs have piqued the curiosity of the
computer/communications community. Each member of the
panel briefly described his or her LAN installation and
workload as a means of introduction to the audience.
Questions about performance were arranged into a
sequence by performance attributes. Those attributes
thought to be of greatest important were discussed
first. Discussion on the remainder of the attributes
continued as time and audience interaction permitted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roehr:1981:PALb,
author = "Kuno M. Roehr and Horst Sadlowski",
title = "Performance analysis of local communication loops",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "173--173",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801705",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The communication loops analyzed here provide an
economical way of attaching many different terminals to
a IBM 4331 host processor which may be several
kilometers away. As a first step of the investigation
protocol overhead is derived. It consists of request
and transmission headers and the associated
acknowledgements as defined by the System Network
Architecture. Additional overhead is due to the
physical layer protocols of the Synchronous Data Link
Control including lower level confirmation frames. The
next step is to describe the performance
characteristics of the loop attachment hardware,
primarily consisting of the external loop station
adapters for local and teleprocessing connections and
the loop adapter processor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sternick:1981:SAD,
author = "Barbara R. Sternick",
title = "Systems aids in determining {Local Area Network}
performance characteristics",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "174--174",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801706",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "At Bethesda, Maryland, the National Library of
Medicine has a large array of heterogeneous data
processing equipment dispersed over ten floors in the
Lister Hill Center and four floors in the Library
Building. The National Library of Medicine decided to
implement a more flexible, expansible access medium
(Local Area Network (LAN)) to handle the rapid growth
in the number of local and remote users and the
changing requirements. This is a dual coaxial cable
communications system designed using cable television
(CATV) technology. One cable, the outbound cable,
transfers information between the headend and the user
locations. The other cable, the inbound cable,
transfers information from the user locations to the
headend. This system will permit the distribution of
visual and digital information on a single medium.
On-line devices, computers, and a technical control
system network control center are attached to the LAN
through BUS Interface Units (BIUs). The technical
control system will collect statistical and status
information concerning the traffic, BIUs, and system
components. The BIUs will, at fixed intervals, transmit
status information to the technical control. The
Network Control Centers (NCC) will provide network
directory information for users of the system,
descriptions of the services available, etc. A X.25
gateway BIU will interface the LAN to the public
networks (Telenet and Tymnet) and to X.25 host computer
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anonymous:1981:AI,
author = "Anonymous",
title = "Authors Index",
journal = j-SIGMETRICS,
volume = "11",
number = "1",
pages = "175--175",
month = "Spring",
year = "1981",
CODEN = "????",
DOI = "https://doi.org/10.1145/800047.801707",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:02 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1982:PET,
author = "M. K. Rajaraman",
title = "Performance evaluation through job scheduler
modeling",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "9--15",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The scheduler in the Cyber-176 computer does the major
functions of routing the job through the system,
controlling job's progress through aging and swapping
of jobs between various queues and resource allocation
among jobs. This paper reports some results of the
performance evaluation study of the Cyber-176 by
modeling the scheduler as the heart of the system. The
study explores the effects of varying the scheduler
parameters in the performance of the machine in a
particular installation. The basic theme of the paper
is that the selection of parameters in a laboratory or
a system test environment may not always result in the
best performance in an actual installation. The
simulation provides vital information for installation
management and tuning the operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mager:1982:TPA,
author = "Peter S. Mager",
title = "Toward a parametric approach for modeling local area
network performance",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "17--28",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800502",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The task of modeling the performance of a single
computer (host) with associated peripheral devices is
now well understood [Computer 80]. In fact, highly
usable tools based on analytical modeling techniques
are commercially available and in widespread use
throughout the industry. [Buzen 78] [Buzen 81] [Won 81]
These tools provide a mechanism for describing
computerized environments and the workloads to be
placed on them in a highly parameterized manner. This
is important because it allows users to describe their
computer environments in a structured way that avoids
unnecessary complexity. It also is helpful in
facilitating intuitive interpretations of modeling
results and applying them to capacity planning
decisions. A first step toward building a modeling tool
and associated network specification language that
allows straightforward, inexpensive, and interpretable
modeling of multi-computer network performance is to
identify the set of characteristics (parameters) that
most heavily influence that performance. The result of
such a study for the communication aspects of local
area networks is the subject of this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaffney:1982:SSI,
author = "John E. {Gaffney, Jr.}",
title = "Score `82 --- a summary (at {IBM Systems Research
Institute}, 3\slash 23-3\slash 24\slash 82)",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "30--32",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "``Score `82'', the first workshop on software counting
rules, was attended by practitioners who are working
with ``software metrics''. The concern was with
methodologies for counting such software measurables as
the number of ``operators'', ``operands'' or the number
of lines of code in a program. A ``metric'' can be a
directly countable ``measurable'' or a quantity
computable from one or several such ``measurables''.
``Metrics'' quantify attributes of the software
development process, the software itself, or some
aspect of the interaction of the software with the
processor that hosts it. In general, a ``metric''
should be useful in the development of software and in
measuring its quality. It should have some theory to
support its existence, and it should be based on actual
software data. This workshop was concerned principally
with the data aspects of ``metrics'', especially with
the rules underlying the collection of the data from
which they are computed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misek-Falkoff:1982:NFS,
author = "Linda D. Misek-Falkoff",
title = "The new field of {``Software Linguistics''}: an
early-bird view",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "35--51",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800504",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The phrase ``Software Linguistics'' is applied here to
a text-based perspective on software quality matters.
There is much in the new work on Software Metrics
generally, and Software Science in particular, that is
reminiscent of the activities of Natural Language
analysis. Maurice Halstead held that Software Science
could shed light on Linguistics; this paper sketches
some mutually informing reciprocities between the two
fields, and across related areas of textual, literary,
discourse, and communications analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Ease of use; Linguistics; Metrics; Natural language
analysis; Quality; Software science; Text complexity",
}
@Article{Spiegel:1982:SCR,
author = "Mitchell G. Spiegel",
title = "Software counting rules: {Will} history repeat
itself?",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "52--56",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800505",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Counting rules in the software metrics field have been
developed for counting such software measurables as the
occurrence of operators, operands and the number of
lines of code. A variety of software metrics, such as
those developed by Halstead and others, are computed
from these numbers. Published material in the software
metrics field has concentrated on relationships between
various metrics, comparisons of values obtained for
different languages, etc. Yet, little, if anything has
been published on assumptions, experimental designs, or
the nature of the counting tools (or programs)
themselves used to obtain the basic measurements from
which these metrics are calculated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kavi:1982:EDS,
author = "Krishna M. Kavi and U. B. Jackson",
title = "Effect of declarations on software metrics: an
experiment in software science",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "57--71",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800506",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The attractiveness of software science [HAL77] is to
some extent due to the simplicity of its
instrumentation. Upon learning the detailed rules of
counting operators and operands, the experiments and
derivations using various algorithms and languages can
be repeated. Proposed or actual applications of
software science are quite varied (For example, see
[SEN79]). The size and construction time of a program
can be estimated from the problem specification and the
choice of programming language. An estimate of the
number of program bugs can be shown to depend on
programming effort. Optimal choice of module sizes for
multimodule implementations can be computed. Elements
of software science have applications to the analysis
of technical prose. The purpose of this experiment is
three fold. First, we want to apply software science
metrics to the language `C'. The second purpose of the
experiment is to study the effect of including
declaration statements while counting operators and
operands. Finally, we have set out to determine whether
the area of application has any influence on software
science metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaffney:1982:MIC,
author = "John E. {Gaffney, Jr.}",
title = "{Machine Instruction Count Program}",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "72--79",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Machine Instruction Count Program (MIC Program)
was originally developed in 1978 to produce `operator'
and `operand' counts of object programs written for the
AN/UYK-7 military computer. In 1981, its capability was
expanded so that it could apply to the AN/UYS-1 (or
``Advanced Signal Processor'') military computer. The
former machine, made by UNIVAC, hosts the IBM-developed
software for the sonar and defensive weapons
system/command system for the TRIDENT missile launching
submarine and the software for the sonar for the new
Los Angeles-class attack submarines. The second
machine, made by IBM, is incorporated into several
military systems including the LAMPS anti-submarine
warfare system. The MIC program has been applied to
collect a large amount of data about programs written
for the AN/UYK-7 and AN/UYS-1 computers. From these
data, various of the well-known software `metrics'(1)
such as `volume', `language level', and `difficulty'
have been calculated. Some of the results obtained have
been reported in the literature (3,4). Probably, the
most significant practical use of these data, so far,
has been the development of formulas for use in the
estimation of the amount of code to be written(2,5) as
a function of measures of the requirements that they
are to implement or the (top-level) design that they
are to implement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misek-Falkoff:1982:UHS,
author = "Linda D. Misek-Falkoff",
title = "A unification of {Halstead}'s {Software Science}
counting rules for programs and {English} text, and a
claim space approach to extensions",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "80--114",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In his Elements of Software Science, Maurice Halstead
proposed that software quality measurements could be
based on static lexemic analysis of the vocabularies of
operators and operands, and the number of occurrences
of each class, in computer programs. He also proposed
that quality issues in Natural Language text could be
addressed from similar perspectives, although his rules
for programs and for English seem to conflict. This
paper suggests that Halstead's seemingly disparate
rules for classifying the tokens of programs and the
tokens of English can be generally reconciled, although
Halstead himself does not claim such a union. The
thesis of Part One is a unification of his two
procedures, based on a linguistic partitioning between
``open'' and ``closed'' classes. This unification may
provide new inputs to some open issues concerning
coding, and suggest, on the basis of a conceptual
rationale, an explanation as to why programs which are
by Halstead's definition ``impure'' might indeed be
confusing to the human reader. Part Two of this paper,
by exploring the nodes in a textual ``Claim Space,''
briefly considers other groupings of the classes taken
as primitive by Halstead, in ways which bring to light
alternate and supplementary sets of candidate coding
rules productive for study of textual quality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Linguistics; Metrics; Natural language analysis;
Quality; Software science; Text complexity",
}
@Article{Estes:1982:DPO,
author = "George E. Estes",
title = "Distinguishing the potential operands in {FORTRAN}
programs",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "115--117",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800002.800509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are several possible relationships between the
number of potential operands and the actual operands
used which correlate with available data (such as
Akiyama's debugging data). However, additional data is
required to distinguish between these hypotheses. Since
there is a large body of programs available written in
FORTRAN, we wish to develop a mechanical counting
procedure to enumerate potential operands in FORTRAN
programs. We are currently developing counting rules
for these potential operands. Sub-routine parameters
and input/output variables are relatively easy to
identify. However, a number of FORTRAN features, such
as COMMON blocks and EQUIVALENCE'd variables introduce
serious complications. Some additional analysis of
usage or heuristic approaches are required to
differentiate potential operands in these situations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conte:1982:EDC,
author = "S. D. Conte and V. Y. Shen and K. Dickey",
title = "On the effect of different counting rules for control
flow operators on {Software Science} metrics in
{Fortran}",
journal = j-SIGMETRICS,
volume = "11",
number = "2",
pages = "118--126",
month = "Summer",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010673.800510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:58:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Halstead in his Theory of Software Science, proposed
that in the Fortran language, each occurrence of a {\tt
GOTO i} for different label {\tt i}'s be counted as a
unique operator. Several writers have questioned the
wisdom of this method of counting GOTO's. In this
paper, we investigate the effect of counting GOTO's as
several occurrences of a single unique operator on
various software science metrics. Some 412 modules from
the International Mathematical and Statistical
Libraries (IMSL) are used as the data base for this
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanthikumar:1982:PCF,
author = "J. G. Shanthikumar and P. K. Varshney and K. Sriram",
title = "A priority cutoff flow control scheme for integrated
voice-data multiplexers",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "8--14",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807790",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the flow control problem
for a movable boundary integrated voice-data
multiplexer. We propose a flow control scheme where a
decision rule based on the data queue length is
employed to cutoff the priority of voice to prevent a
data queue buildup. A continuous-time queueing model
for the integrated multiplexer is developed. The
performance of the flow control scheme is obtained
using an efficient computational procedure. A numerical
example is presented for illustration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cox:1982:DDD,
author = "Springer Cox",
title = "Data, definition, deduction: an empirical view of
operational analysis",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "15--20",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807791",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The theoretical aspects of operational analysis have
been considered more extensively than matters of its
application in practical situations. Since its
relationships differ in their applicability, they must
be considered separately when they are applied. In
order to do this, the foundations of three such
relationships are examined from an empirical point of
view. To further demonstrate the intimate connection
between data, definitions, and performance models, the
problem of measurement artifact is considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perros:1982:QLD,
author = "H. G. Perros",
title = "The queue-length distribution of the {M\slash Ck\slash
1} queue",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "21--24",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807792",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The exact closed-form analytic expression of the
probability distribution of the number of units in a
single server queue with Poisson arrivals and Coxian
service time distribution is obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anderson:1982:BMP,
author = "Gordon E. Anderson",
title = "{Bernoulli} methods for predicting communication
processor performance",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "25--29",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800201.807793",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a method for applying Bernoulli
trials to predict the number of communication lines a
communication processor can process without losing data
due to character overrun conditions. First, a simple
method for determining the number of lines which a
communication processor can support without possibility
of character overrun will be illustrated. Then, it will
be shown that communication processors can tolerate
occasional character overrun. Finally, using Bernoulli
trials, the probability of character overrun and the
mean time between character overrun will be calculated.
These last two figures are useful to system designers
in determining the number of lines which a
communication processor can reasonably support.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Bernoulli trials; Character overrun; Communication
processor; Markov process; Protocol; Thrashing",
}
@Article{Laurmaa:1982:AHT,
author = "Timo Laurmaa and Markku Syrj{\"a}nen",
title = "{APL} and {Halstead}'s theory: a measuring tool and
some experiments",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "32--47",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807794",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have designed and implemented an algorithm which
measures APL-programs in the sense of software science
by M. H. Halstead /1/. The reader is assumed to be
familiar with the theories of software science. Our
purpose has been to find the best possible algorithm to
automatically analyse large quantities of APL-programs.
We have also used our measuring tool to make some
experiments to find out if APL-programs and workspaces
obey the laws of software science or not. Becasue our
purpose was to analyse large quantities, i.e. hundreds
of programs we have not implemented an algorithm, which
gives exactly correct results from software science
point of view, because this would necessitate manual
clues to the analysing algorithm and thus an
interactive mode of analysis. Instead of it we have
strived for a tool, which carries out the analysis
automatically and as correctly as possible. In the next
section some difficulties encountered in the design of
the measuring algorithm and some inherent limitations
of it are discussed. Section 3 summarises the sources
of errors in the analysis carried out by our algorithm,
while section 4 gives a more detailed description of
the way analysis is carried out. The remaining sections
of this paper report on some experiments we have
carried out using our measuring tool. The purpose of
these experiments has been to evaluate the explaining
power of Halstead's theory in connection of
APL-programs. However, no attempt has been made to
process the results of the experiments statistically.
The results of the experiments have been treated here
only when `obvious' (in)compatibilities between the
theory and the results have been observed. Possible
reasons for the (in)compatibilities are also pointed
out.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Beser:1982:FES,
author = "Nicholas Beser",
title = "Foundations and experiments in software science",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "48--72",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/800201.807795",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of papers have appeared on the subject of
software science; claiming the existence of laws
relating the size of a program and the number of
operands and operators used. The pre-eminent theory was
developed by Halstead in 1972. The thesis work focuses
on the examination of Halstead's theory; with an
emphasis on his fundamental assumptions. In particular,
the length estimator was analyzed to determine why it
yields such a high variance; the theoretical
foundations of software science have been extended to
improve the applicability of the critical length
estimator. This elaboration of the basic theory will
result in guidelines for the creation of counting rules
applicable to specific classes of programs, so that it
is possible to determine both when and how software
science can be applied in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schnurer:1982:PAP,
author = "Karl Ernst Schnurer",
title = "{Product Assurance Program Analyzer} ({P.A.P.A.}) a
tool for program complexity evaluation",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "73--74",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807796",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tool has been developed to assist in the software
validation process. P.A.P.A. will measure the
complexity of programs and detect several program
anomalies. The resulting list of analyzed programs is
sorted in order of descending complexity. Since high
complexity and error-proneness are strongly related,
the ``critical'' programs will be found earlier within
the development cycle. P.A.P.A. provides syntax
analyzers for RPG (II/III), PSEUDOCODE (design and
documentation language) and PL/SIII (without macro
language). It may be applied during the design-,
coding- and test phase of software development (e.g.
for design- and code inspections).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gross:1982:CME,
author = "David R. Gross and Mary A. King and Michael R. Murr
and Michael R. Eddy",
title = "Complexity measurement of {Electronic Switching System
(ESS)} software",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "75--85",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807797",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have been developing a tool that measures the
complexity of software: (1) to predict the quality of
software products and (2) to allocate proportionally
more testing resources to complex modules. The software
being measured is real-time and controls telephone
switching systems. This software system is large and
its development is distributed over a period of several
years, with each release providing enhancements and bug
fixes. We have developed a two-stage tool consisting of
a parser and an analyzer. The parser operates on the
source code and produces operator, operand, and
miscellaneous tables. These tables are then processed
by an analyzer program that calculates the complexity
measures. Changes for tuning our Halstead counting
rules involve simple changes to the analyzer only.
During the development there were problems and issues
to be confronted dealing with static analysis and code
metrics. These are also described in this paper. In
several systems we found that more than 80\% of
software failures can be traced to only 20\% of the
modules in the system. The McCabe complexity and some
of Halstead's metrics score higher than the count of
executable statements in their correlations with field
failures. It is reasonable to expect that we could
devote more effort to the review and test of
high-complexity modules and increase the quality of the
software product that we send to the field.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hartman:1982:CTR,
author = "Sandra D. Hartman",
title = "A counting tool for {RPG}",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "86--100",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807798",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Halstead and McCabe metrics were evaluated for
their usefulness in identifying RPG II and RPG III
modules likely to contain a high number of errors. For
this evaluation, commercially available RPG modules
written within IBM were measured and assigned to low,
medium, or high metric value ranges. Conclusions from
this evaluation and RPG counting rules that were
concomitantly developed were presented at SCORE82 and
are summarized in the following report.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Naib:1982:ASS,
author = "Farid A. Naib",
title = "An application of software science to the quantitative
measurement of code quality",
journal = j-SIGMETRICS,
volume = "11",
number = "3",
pages = "101--128",
month = "Fall",
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1010675.807799",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The error rate of a software application may function
as a measure of code quality. A methodology has been
developed which allows for the accurate prediction of
the error rate and hence code quality prior to an
application's release. Many factors were considered
which could conceivably be related to the error rate.
These factors were divided into two categories: those
factors which vary with time, and those factors which
do not vary with time. Factors which vary with time
were termed environmental factors and included such
items as: number of users, errors submitted to date,
etc. Factors which do not vary with time were termed
internal factors and included Halstead metrics, McCabe
metrics and lines of code.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1982:OCT,
author = "Russ Blake",
title = "Optimal control of thrashing",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "1--10",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035295",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The method of discrete optimal control is applied to
control thrashing in a virtual memory. Certain
difficulties with several previous approaches are
discussed. The mechanism of optimal control is
presented as an effective, inexpensive alternative. A
simple, ideal policy is devised to illustrate the
method. A new feedback parameter, the thrashing level,
is found to be a positive and robust indicator of
thrashing. When applied to a real system, the idealized
policy effectively controlled the virtual memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Babaoglu:1982:HRD,
author = "{\"O}zalp Babao{\u{g}}lu",
title = "Hierarchical replacement decisions in hierarchical
stores",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "11--19",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035296",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the primary motivations for implementing
virtual memory is its ability to automatically manage a
hierarchy of storage systems with different
characteristics. The composite system behaves as if it
were a single-level system having the more desirable
characteristics of each of its constituent levels. In
this paper we extend the virtual memory concept to
within each of the levels of the hierarchy. Each level
is thought of as containing two additional levels
within it. This hierarchy is not a physical one, but
rather an artificial one arising from the employment of
two different replacement algorithms. Given two
replacement algorithms, one of which has good
performance but high implementation cost and the other
poor performance but low implementation cost, we
propose and analyze schemes that result in an overall
algorithm having the performance characteristics of the
former and the cost characteristics of the latter. We
discuss the suitability of such schemes in the
management of storage hierarchies that lack page
reference bits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hagmann:1982:PPR,
author = "Robert B. Hagmann and Robert S. Fabry",
title = "Program page reference patterns",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "20--29",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035298",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a set of measurements of the
memory reference patterns of some programs. The
technique used to obtain these measurements is
unusually efficient. The data is presented in graphical
form to allow the reader to `see' how the program uses
memory. Constant use of a page and sequential access of
memory are easily observed. An attempt is made to
classify the programs based on their referencing
behavior. From this analysis it is hoped that the
reader will gain some insights as to the effectiveness
of various memory management policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bunt:1982:EMP,
author = "R. B. Bunt and R. S. Harbus and S. J. Plumb",
title = "The effective management of paging storage
hierarchies",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "30--38",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035299",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of storage hierarchies in the implementation
of a paging system is investigated. Alternative
approaches for managing a paging storage hierarchy are
described and two are selected for further study ---
staging and migration. Characteristic behaviour is
determined for each of these approaches and a series of
simulation experiments is conducted (using program
reference strings as data) for the purpose of comparing
them. The results clearly show migration to be a
superior approach from the point of view of both cost
and performance. Conclusions are drawn on the
effectiveness of each approach in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hodges:1982:WCP,
author = "Larry F. Hodges and William J. Stewart",
title = "Workload characterization and performance evaluation
in a research environment",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "39--50",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035301",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the process of bench-marking the
diverse research environment that constitutes the
workload of VAX/VMS at the University Analysis and
Control Center at North Carolina State University. The
benchmarking process began with a study of the system
load and performance characteristics over the six-month
period from January to June of 1981. Statistics were
compiled on the number of active users, CPU usage by
individual accounts, and peak load periods. Individual
users were interviewed to determine the nature and
major computing characteristics of the research they
were conducting on VAX. Information from all sources
was compiled to produce a benchmark that closely
paralleled actual system activity.\par
An analytic model was introduced and used in
conjunction with the benchmark data and hardware
characteristics to derive performance measures for the
system. Comparisons with measured system performance
were conducted to demonstrate the accuracy of the
model. The model was then employed to predict
performance as the system workload was increased, to
suggest improvements for the system, and to examine the
effects of those improvements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haring:1982:SDW,
author = "G{\"u}nter Haring",
title = "On state-dependent workload characterization by
software resources",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "51--57",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035302",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method for the characterization of computer workload
at the task level is presented. After having divided
the workload into different classes using a cluster
technique, each cluster is further analysed by state
dependent transition matrices. Thus it is possible to
derive the most probable task sequences in each
cluster. This information can be used to construct
synthetic scripts at the task level rather than the
usual description at the hardware resource level.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bolzoni:1982:PIS,
author = "M. L. Bolzoni and M. C. Calzarossa and P. Mapelli and
G. Serazzi",
title = "A package for the implementation of static workload
models",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "58--67",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035303",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The general principles for constructing workload
models are reviewed. The differences between static and
dynamic workload models are introduced and the
importance of the classification phase for the
implementation of both types of workload models is
pointed out. All the operations required for
constructing static workload models have been connected
in a package. Its main properties and fields of
application are presented. The results of an
experimental study performed with the package on a
batch and interactive workload show its ease of use and
the accuracy of the model obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McDaniel:1982:MSI,
author = "Gene McDaniel",
title = "The {Mesa Spy}: an interactive tool for performance
debugging",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "68--76",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035305",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Spy is a performance evaluation tool for the Mesa
environment that uses a new extension to the PC
sampling technique. The data collection process can use
information in the run time call stack to determine
what code is responsible for the resources being
consumed. The Spy avoids perturbing the user
environment when it executes, provides symbolic output
at the source-language level, and can be used without
recompiling the program to be examined. Depending upon
how much complication the user asks for during data
collection, the Spy steals between 0.3\% and 1.8\% of
the cycles of a fast machine, and between 1.08\% and
35.9\% of the cycles on a slow machine.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "high level language performance debugging; pc
sampling; performance analysis",
}
@Article{Hercksen:1982:MSE,
author = "Uwe Hercksen and Rainer Klar and Wolfgang
Klein{\"o}der and Franz Knei{\ss}l",
title = "Measuring simultaneous events in a multiprocessor
system",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "77--88",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035306",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the hierarchically organized multiprocessor system
EGPA, which has the structure of a pyramid, the
performance of concurrent programs is studied. These
studies are assisted by a hardware monitor
(Z{\"A}HLMONITOR III), which measures not only the
activity and idle states of CPU and channels, but
records the complete history of processes in the CPU
and interleaved I/O activities. The applied method is
distinguished from usual hardware measurements for two
reasons: it puts together the a priori independent
event-streams coming from the different processors to a
well ordered single event stream and it records not
only hardware but also software events. Most useful
have been traces of software events, which give the
programmer insight into the dynamic cooperation of
distributed subtasks of his program. This paper
describes the measurement method and its application to
the analysis of the behaviour of a highly asynchronous
parallel algorithm: the projection of contour lines
from a given point of view and the elimination of
hidden lines.\par
This work is sponsored by the Bundesminister f{\"u}r
Forschung und Technologie (German Federal Minister of
Research and Technology).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1982:SDF,
author = "Erol Gelenbe",
title = "Stationary deterministic flows in discrete systems:
{I}",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "89--101",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a deterministic system whose state space
is the $n$-dimensional first orthant. It may be
considered as a network of (deterministic) queues, a
Karp-Miller vector addition system, a Petrinet, a
complex computer system, etc. Weak assumptions are then
made concerning the asymptotic or limiting behaviour of
the instants at which events are observed across a cut
in the system: these instants may be considered as
`arrival' or `departure' instants. Thus, like in
operational analysis, we deal with deterministic and
observable properties and we need no stochastic
assumptions or restrictions (such as independence,
identical distributions, etc.).\par
We consider however asymptotic or stationary
properties, as in conventional queueing analysis. Under
our assumptions a set of standard theorems are proved:
concerning arrival and departure instant measures,
concerning, `birth and death' type equations, and
concerning Little's formula. Our intention is to set
the framework for a new approach to performance
modelling of computer systems in a context close to
that used in actual measurements, but taking into
account infinite time behaviour in order to take
advantage of the useful mathematical properties of
asymptotic results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baccelli:1982:DBR,
author = "F. Baccelli and E. G. Coffman",
title = "A data base replication analysis using an {M\slash
M\slash m} queue with service interruptions",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "102--107",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A study of file replication policies for distributed
data bases will be approached through the analysis of
an M/M/m queue subjected to state-independent,
preemptive interruptions of service. The durations of
periods of interruption constitute a sequence of
independent, identically distributed random variables.
Independently, the times measured from the termination
of one period of interruption to the beginning of the
next form a sequence of independent, exponentially
distributed random variables. Preempted customers
resume service at the terminations of interrupt
periods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Plateau:1982:MPR,
author = "Brigitte Plateau and Andreas Staphylopatis",
title = "Modelling of the parallel resolution of a numerical
problem on a locally distributed computing system",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "108--117",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern VLSI technology has enabled the development of
high-speed computing systems, based upon various
multiprocessor architecture [1]. We can distinguish
several types of such systems, depending on the control
policies adopted, the interprocessor communication
modes and the degree of resource-sharing. The
efficiency of parallel processing may be significant in
various areas of computer applications; especially,
large numerical applications, such as the solution of
linear systems and differential equations, are marked
by the need of high computation speeds. So, the advance
of parallel processing systems goes together with
research effort in developing efficient parallel
algorithms [2]. The implementation of parallel
algorithms concerns the execution of concurrent
processes, assigned to the processors of the system,
which communicate with each other. The synchronization
needed at process interaction points implies the
existence of waiting delays, which constitute the main
limiting factor of parallel computation. Several
modelling techniques have been developed, that allow
the prediction and verification of parallel systems
performance. The two general approaches followed
concern deterministic models [3] and probabilistic
models. The latter, based on the theory of stochastic
processes [5] \ldots{} are well adapted to the analysis
of complex variable phenomena and provide important
measures concerning several aspects of parallel
processing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bard:1982:MSD,
author = "Yonathan Bard",
title = "Modeling {I/O} systems with dynamic path selection,
and general transmission networks",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "118--129",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035312",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines general transmission networks, of
which I/O subsystems are a special case. By using the
maximum entropy principle, we answer questions such as
what is the probability that a path to a given node is
free when that node is ready to transmit. Systems with
both dynamic and fixed path selection mechanisms are
treated. Approximate methods for large networks are
proposed, and numerical examples are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lazowska:1982:MCM,
author = "Edward D. Lazowska and John Zahorjan",
title = "Multiple class memory constrained queueing networks",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "130--140",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035313",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most computer systems have a memory constraint: a
limit on the number of requests that can actively
compete for processing resources, imposed by finite
memory resources. This characteristic violates the
conditions required for queueing network performance
models to be separable, i.e., amenable to efficient
analysis by standard algorithms. Useful algorithms for
analyzing models of memory constrained systems have
been devised only for models with a single customer
class.\par
In this paper we consider the multiple class case. We
introduce and evaluate an algorithm for analyzing
multiple class queueing networks in which the classes
have independent memory constraints. We extend this
algorithm to situations in which several classes share
a memory constraint. We sketch a generalization to
situations in which a subsystem within an overall
system model has a population constraint.\par
Our algorithm is compatible with the extremely time-
and space-efficient iterative approximate solution
techniques for separable queueing networks. This level
of efficiency is mandatory for modelling large
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solution technique; computer system
performance evaluation; memory constraint; population
constraint; queueing network model",
}
@Article{Brandwajn:1982:FAS,
author = "Alexandre Brandwajn",
title = "Fast approximate solution of multiprogramming models",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "141--149",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035332.1035314",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models of computer systems with
multiprogramming constraints generally do not possess a
product-form solution in the sense of Jackson.
Therefore, one is usually led to consider approximation
techniques when dealing with such models. Equivalence
and decomposition is one way of approaching their
solution. With multiple job classes, the equivalent
network may be viewed as a set of interdependent
queues. In general, the state-dependence in this
equivalent network precludes a product-form solution,
and the size of its state space grows rapidly with the
number of classes and of jobs per class. This paper
presents two methods for approximate solution of the
equivalent state-dependent queueing network. The first
approach is a manifold application of equivalence and
decomposition. The second approach, less accurate than
the first one, is a fast-converging iteration whose
computational complexity grows near-linearly with the
number of job classes and jobs in a class. Numerical
examples illustrate the accuracy of the two methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solutions; equivalence and decomposition;
multiprogramming; queueing network models; simultaneous
resource possession",
}
@Article{Agrawal:1982:ASM,
author = "Subhash C. Agrawal and Jeffrey P. Buzen",
title = "The aggregate server method for analyzing
serialization delays in computer systems",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "150--150",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035316",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The aggregate server method is an approximate,
iterative technique for analyzing the delays programs
encounter while waiting for entry into critical
sections, non-reentrant subroutines, and similar
software structures that cause processing to become
serialized. The method employs a conventional product
form queueing network comprised of servers that
represent actual I/O devices and processors, plus
additional aggregate servers that represent serialized
processing activity. The parameters of the product form
network are adjusted iteratively to account for
contention among serialized and non-serialized
customers at each physical device.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1982:PAS,
author = "Connie U. Smith and David D. Loendorf",
title = "Performance analysis of software for an {MIMD}
computer",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "151--162",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a technique for modeling and
analyzing the performance of software for an MIMD
(Multiple Instruction Multiple Data) computer. The
models can be used as an alternative to experimentation
for the evaluation of various algorithms and different
degrees of parallelism. They can also be used to study
the tradeoffs involved in increasing the amount of
parallel computation at the expense of increased
overhead for synchronization and communication. The
detection and alleviation of performance bottlenecks is
facilitated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agre:1982:MRN,
author = "Jon R. Agre and Satish K. Tripathi",
title = "Modeling reentrant and nonreentrant software",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "163--178",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A description of software module models for computer
systems is presented. The software module models are
based on a two level description, the software level
and the hardware level, of the computer system. In the
software module level it is possible to model
performance effects of software traits such as
reentrant and nonreentrant type software modules. The
resulting queueing network models are, in general, not
of the product form class and approximation schemes are
employed as solution techniques.\par
An example of a software module model of a hypothetical
computer system is presented. The model is solved with
a simulation program and three approximation schemes.
The approximation results were compared with the
simulation results and some schemes are found to
produce good estimates of the effects of changing from
reentrant to non-reentrant software modules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:1982:OME,
author = "L. T. Wu",
title = "Operational models for the evaluation of degradable
computing systems",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "179--185",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances in multiprocessor technology have
established the need for unified methods to evaluate
computing systems performance and reliability. In
response to this modeling need, this paper considers a
general modeling framework which permits the modeling,
analysis and evaluation of degradable computing
systems. Within this framework, a simple and useful
user-oriented performance variable is identified and
shown to be a proper generalization of the traditional
notions of system performance and reliability.\par
The modeling and evaluation methods considered in this
paper provide a relatively straightforward approach for
integrating reliability and availability measures with
performance measures. The hierarchical decomposition
approach permits the modeling and evaluation of a
computing system's subsystems (e.g., hardware,
software, peripherals, interfaces, user demand systems)
as a whole rather than the traditional methods of
evaluating these subsystems independently. Accordingly,
it becomes possible to evaluate the performance of the
system software and the reliability of the system
hardware simultaneously in order to measure the
effectiveness of the system design. Since the
performance variable introduced permits the
characterization of the system performance according to
the user's view of the systems, the results obtained
represent more accurate assessments of the system's
ability to perform than the existing performance or
reliability measures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marie:1982:ECA,
author = "Raymond A. Marie and Patricia M. Snyder and William J.
Stewart",
title = "Extensions and computational aspects of an iterative
method",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "186--194",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The so-called iterative methods are among a class of
methods that have recently been applied to obtain
approximate solutions to general queueing networks. In
this paper it is shown that if the network contains
feedback loops, then it is more advantageous to
incorporate these loops into the analysis of the
station itself rather than into the analysis of the
complement of the station. We show how this analysis
may be performed for a simple two-phase Coxian server.
Additionally, it is shown that the number of iterations
required to achieve a specified degree of accuracy may
be considerably reduced by using a continuous updating
procedure in which the computed throughputs are
incorporated as soon as they are available, rather than
at the end of an iteration. An efficient computational
scheme is presented to accompany this continuous
updating. Finally a number of examples are provided to
illustrate these features.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neuse:1982:HHA,
author = "Doug Neuse and K. Mani Chandy",
title = "{HAM}: the heuristic aggregation method for solving
general closed queueing network models of computer
systems",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "195--212",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An approximate analytical method for estimating
performance statistics of general closed queueing
network models of computing systems is presented. These
networks may include queues with priority scheduling
disciplines and non-exponential servers and several
classes of jobs. The method is based on the aggregation
theorem (Norton's theorem) of Chandy, Herzog and Woo.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aggregation theorem; analytical models;
approximations; computer system models; general closed
queueing networks; non-local-balance; non-product-form;
performance analysis; priority scheduling",
}
@Article{Eager:1982:PBH,
author = "D. L. Eager and K. C. Sevcik",
title = "Performance bound hierarchies for queueing networks",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "213--214",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In applications of queueing network models to computer
system performance prediction, the computational effort
required to obtain an exact equilibrium solution of a
model may not be justified by the accuracy actually
required. In these cases, there is a need for
approximation or bounding techniques that can provide
the necessary information at reduced cost. This paper
presents Performance Bound Hierarchies (PBHs) for
single class separable queueing networks consisting of
fixed rate and delay service centers. A PBH consists of
a hierarchy of upper (pessimistic) or lower
(optimistic) bounds on mean system residence time. (The
bounds can also be expressed as bounds on system
throughput or center utilizations.) Each successive
member requires more computational effort, and in the
limit, the bounds converge to the exact solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brumfield:1982:EAH,
author = "Jeffrey A. Brumfield and Peter J. Denning",
title = "Error analysis of homogeneous mean queue and response
time estimators",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "215--221",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035325",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Flow balance and homogeneity assumptions are needed to
derive operational counterparts of M/M/1 queue length
and response time formulas. This paper presents
relationships between the assumption errors and the
errors in the queue length and response time estimates.
A simpler set of assumption error measures is used to
derive bounds on the error in the response time
estimate. An empirical study compares actual errors
with their bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harbitter:1982:MTL,
author = "Alan Harbitter and Satish K. Tripathi",
title = "A model of transport level flow control",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "222--232",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A Markov Decision Process model is developed to
analyze buffer assignment at the transport level of the
ARPAnet protocol. The result of the analysis is a
method for obtaining an assignment policy which is
optimal with respect to a delay/throughput/overhead
reward function. The nature of the optimal policy is
investigated by varying parameters of the reward.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1982:CPC,
author = "Erol Gelenbe and Isi Mitrani",
title = "Control policies in {CSMA} local area networks:
{Ethernet} controls",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "233--240",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035328",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An analysis of the random carrier sense multiple
access channel is presented in terms of the behaviour
of each participating station. A detailed model of the
station protocol, including the control policy used in
case collisions, is used to derive the traffic and
throughput of each station. The channel traffic
characteristics are derived from this model and used,
in turn, to derive the traffic parameters entering into
the station model. This provides a solution method for
complete system characteristics for a finite
prespecified set of stations. The approach is then used
to analyse control policies of the type used in
ETHERNET. We show, in particular, that as the
propagation delay becomes small, the specific form of
the control policy tends to have a marginal effect on
network performance. The approach also applies to the
DANUBE and XANTHOS networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:1982:ATF,
author = "Satish K. Tripathi and Alan Harbitter",
title = "An analysis of two flow control techniques",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "241--249",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035329",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queuing models can be useful tools in comparing the
performance characteristics of different flow control
techniques. In this paper the window control mechanism,
incorporated in protocols such as X.25 is compared to
the ARPAnet buffer reservation scheme. Multiclass
queuing models are used to examine message throughput
and delay characteristics. The analysis highlights the
interaction of long and short message (in terms of
length in packets) transmitters under the two flow
control techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{King:1982:MCR,
author = "P. J. B. King and I. Mitrani",
title = "Modelling the {Cambridge Ring}",
journal = j-SIGMETRICS,
volume = "11",
number = "4",
pages = "250--258",
month = dec,
year = "1982",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035293.1035330",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 10:59:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Models for the local area computer network known as
the Cambridge Ring are developed and evaluated. Two
different levels of protocol are considered: the
hardware and the Basic Block. These require different
approaches and, in the second case, an approximate
solution method. A limited comparison between the
Cambridge Ring and another ring architecture --- the
token ring --- is carried out.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marrevee:1982:PRT,
author = "J. Marrevee",
title = "The power of the read track and the need for a write
track command for disk back-up and restore utilities",
journal = j-SIGMETRICS,
volume = "12",
number = "1",
pages = "10--14",
month = dec,
year = "1982/1983",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Perros:1982:MPR,
author = "H. G. Perros",
title = "A model for predicting the response time of an on-line
system for electronic fund transfer",
journal = j-SIGMETRICS,
volume = "12",
number = "1",
pages = "15--21",
month = "Winter",
year = "1982/1983",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Augustin:1982:CCD,
author = "Reinhard Augustin and Klaus-J{\"u}rgen B{\"u}scher",
title = "Characteristics of the {COX}-distribution",
journal = j-SIGMETRICS,
volume = "12",
number = "1",
pages = "22--32",
month = dec,
year = "1982/1983",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perros:1984:QNB,
author = "H. G. Perros",
title = "Queueing networks with blocking: a bibliography",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "8--12",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, queueing networks with blocking have
been studied by researchers from various research
communities such as Computer Performance Modelling,
Operations Research, and Industrial Engineering. In
view of this, related results are scattered throughout
various journals. The bibliography given below is the
result of a first attempt to compile an exhaustive list
of related papers in which analytic investigations
(exact or approximate) or numerical investigations of
queueing networks with blocking have been reported.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeMarco:1984:ASS,
author = "Tom DeMarco",
title = "An algorithm for sizing software products",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "13--22",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041825",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on efforts to develop a cost
forecasting scheme based on a Function Metric called
System BANG. A Function Metric is a quantifiable
indication of system size and complexity derived
directly from a formal statement of system requirement.
Conclusions from a small sample of projects are
presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fishwick:1984:PPG,
author = "Paul A. Fishwick and Stefan Feyock",
title = "{PROFGEN}: a procedure for generating machine
independent high-level language profilers",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "27--31",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many of the tools used in software metrics for
evaluating the execution characteristics of a program
are predicated on specific counting rules for operators
and operands [1, 2]. The analyst may use these counting
techniques to determine such program attributes as
estimation of object code size prior to actual
compilation and the relative efficiencies of various
language compilers. Operator/operand measures provide
useful results for certain analyses, but a deficiency
exists in that the data derived from this technique
does not directly reflect the program structure
afforded by a high-level language such as FORTRAN,
Pascal, or Ada. There are many instances where it is
desirable to measure the program at the source level
where the execution data may be directly associated
with specific high level program units such as source
statements and blocks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajaraman:1984:PML,
author = "M. K. Rajaraman",
title = "Performance measures for a local network",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "34--37",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041827",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parameters that influence the performance of a local
network consisting of three mainframes and an array
processor are identified. Performance measures are
developed for this network and their significance in
the operation and use of the network are discussed.
Some aspects of implementing such measures in a local
network are examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jones:1984:PEJ,
author = "Greg A. Jones",
title = "Performance evaluation of a job scheduler",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "38--43",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "International Business Machines' (IBM) Job Entry
Subsystem 3 (JES 3) is the integral part of the MVS
operating system that is responsible for controlling
all jobs from their entry into the system until their
exit out of the system. JES 3 maintains total awareness
of each job while it is in the system and services the
jobs upon request. These services include: preparing
the job for execution, selecting the job for execution,
and the processing of SYSIN/SYSOUT data. This paper
reports the findings of the performance evaluation
study of JES 3 through the use of a General Purpose
Simulation System (GPSS) model of JES 3 and exhibits
the benefits of using simulation models to study
complex systems such as JES 3. Once the model was
developed, it was used to evaluate the effects of
varying the job scheduler parameters of JES 3 in the
batch job environment. The input workload and service
times for the model were derived from System Management
Facilities (SMF) and Resource Management Facilities
(RMF) data from the modeled system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Clark:1984:NCP,
author = "Jon D. Clark and Thomas C. Richards",
title = "A note on the cost-performance ratios of {IBM}'s
{43XX} series",
journal = j-SIGMETRICS,
volume = "12",
number = "2",
pages = "44--45",
month = "Spring-Summer",
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041823.1041829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Pricing policies of computers with various performance
capabilities are usually assumed to be non-linear due
to economies-of-scale. This article analyzes the
cost-performance ratios of a single IBM product line,
the 43XX series and found this performance
characteristic to be surprisingly linear but with great
deal of individual variation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer; cost-performance; performance evaluation",
}
@Article{Coffman:1984:RPP,
author = "E. G. {Coffman, Jr.}",
title = "Recent progress in the performance evaluation of
fundamental allocation algorithms",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "2--6",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our understanding of several allocation algorithms
basic to operating systems and to data base systems has
improved substantially as a result of a number of
research efforts within the past one or two years. The
results have stirred considerable excitement in both
theorists and practitioners. This is not only because
of the inroads made into long-standing problems, but
also because of the surprising nature of the results;
in particular, we refer to proofs that certain
classical algorithms described as approximate are in
fact optimal in a strong probabilistic sense. The work
discussed here will be classified according to the
application areas, archival and dynamic storage
allocation. In both cases we are concerned with the
packing problems that arise in making efficient use of
storage. Equivalents of the archival problems also have
importance in scheduling applications [4]; however, we
shall focus exclusively on the storage allocation
setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1984:FAW,
author = "Domenico Ferrari",
title = "On the foundations of artificial workload design",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "8--14",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The principles on which artificial workload model
design is currently based are reviewed. Design methods
are found wanting for three main reasons: their
resource orientation, with the selection of resources
often unrelated to the performance impact of resource
demands; their avoiding to define an accuracy criterion
for the resulting workload model; and their ignoring
the dynamics of the workload to be modeled. An attempt
at establishing conceptual foundations for the design
of interactive artificial workloads is described. The
problems found in current design methods are taken into
account, and sufficient conditions for the
applicability of these methods are determined. The
study also provides guidance for some of the decisions
to be made in workload model design using one of the
current methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perez-Davila:1984:PIF,
author = "Alfredo de J. Perez-Davila and Lawrence W. Dowdy",
title = "Parameter interdependencies of file placement models
in a {Unix} system",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "15--26",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A file assignment case study of a computer system
running Unix is presented. A queueing network model of
the system is constructed and validated. A modeling
technique for the movement of files between and within
disks is proposed. A detailed queueing network model is
constructed for several file distributions in secondary
storage. The interdependencies between the speed of the
CPU, the swapping activity, the visit ratios and the
multiprogramming level are examined and included in the
modeling technique. The models predict the performance
of several possible file assignments. The various file
assignments are implemented and comparisons between the
predicted and actual performance are made. The models
are shown to accurately predict user response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bunt:1984:MPL,
author = "Richard B. Bunt and Jennifer M. Murphy and Shikharesh
Majumdar",
title = "A measure of program locality and its application",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "28--40",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809311",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although the phenomenon of locality has long been
recognized as the single most important characteristic
of program behaviour, relatively little work has been
done in attempting to measure it. Recent work has led
to the development of an intrinsic measure of program
locality based on the Bradford--Zipf distribution.
Potential applications for such a measure are many, and
include the evaluation of program restructuring methods
(manual and automatic), the prediction of system
performance, the validation of program behaviour
models, and the enhanced understanding of the phenomena
that characterize program behaviour. A consideration of
each of these areas is given in connection with the
proposed measure, both to increase confidence in the
validity of the measure and to illustrate a methodology
for dealing with such problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krzesinski:1984:ILM,
author = "A. Krzesinski and J. Greyling",
title = "Improved lineariser methods for queueing networks with
queue dependent centres",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "41--51",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809312",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Lineariser is an MVA-based technique developed for
the approximate solution of large multiclass product
form queueing networks. The Lineariser is capable of
computing accurate solutions for networks of fixed rate
centres. However, problems arise when the Lineariser is
applied to networks containing centres with queue
dependent service rates. Thus networks exist which seem
well suited (a large number of lightly loaded centres,
large numbers of customers in each closed chain) for
Lineariser solution but whose queue dependent centres
cannot be solved accurately by the Lineariser method.
Examples have also been found where the Lineariser
computes accurate values for the queue lengths, waiting
times and throughputs though the values computed for
the queue length distributions are totally in error.
This paper presents an Improved Lineariser which
computes accurate approximate solutions for multiclass
networks containing an arbitrary number of queue
dependent centres. The Improved Lineariser is based on
MVA results and is therefore simple to implement and
numerically well behaved. The Improved Lineariser has
storage and computation requirements of order (MN)
locations and (MNJ2) arithmetic operations where $M$ is
the number of centres, $N$ the total number of
customers and $J$ the number of closed chains. Results
from 130 randomly generated test networks are used to
compare the accuracy of the standard and Improved
Linearisers. The Improved Lineariser is consistently
more accurate (tolerance errors on all performance
measures less than 2 per cent) than the standard
Lineariser and its accuracy is insensitive to the size
of the network model. In addition, the Improved
Lineariser computes accurate solutions for networks
which cause the standard Lineariser to fail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Approximate solutions; Error analysis; Mean value
analysis; Multiclass queueing networks; Product from
solutions",
}
@Article{Zahorjan:1984:ILD,
author = "John Zahorjan and Edward D. Lazowska",
title = "Incorporating load dependent servers in approximate
mean value analysis",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "52--62",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/800264.809313",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network performance modelling technology has
made tremendous strides in recent years. Two of the
most important developments in facilitating the
modelling of large and complex systems are hierarchical
modelling, in which a single load dependent server is
used as a surrogate for a subsystem, and approximate
mean value analysis, in which reliable approximate
solutions of separable models are efficiently obtained.
Unfortunately, there has been no successful marriage of
these two developments; that is, existing algorithms
for approximate mean value analysis do not accommodate
load dependent servers reliably.\par
This paper presents a successful technique for
incorporating load dependent servers in approximate
mean value analysis. We consider multiple class models
in which the service rate of each load dependent server
is a function of the queue length at that server. In
other words, load dependent center $k$ delivers
``service units'' at a total rate of $ f_k(n_k)$ when $
n_k$ customers are present. We present extensive
experimental validation which indicates that our
algorithm contributes an average error in response
times of less than 1\% compared to the (much more
expensive) exact solution.\par
In addition to the practical value of our algorithm,
several of the techniques that it employs are of
independent interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agrawal:1984:RTP,
author = "Subhash C. Agrawal and Jeffrey P. Buzen and Annie W.
Shum",
title = "{Response Time Preservation}: a general technique for
developing approximate algorithms for queueing
networks",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "63--77",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809314",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Response Time Preservation (RTP) is introduced as a
general technique for developing approximate analysis
procedures for queueing networks. The underlying idea
is to replace a subsystem by an equivalent server whose
response time in isolation equals that of the entire
subsystem in isolation. The RTP based approximations,
which belong to the class of decomposition
approximations, can be viewed as a dual of the Norton's
Theorem approach for solving queueing networks since it
matches response times rather than throughputs. The
generality of the RTP technique is illustrated by
developing solution procedures for several important
queueing systems which violate product form
assumptions. Examples include FCFS servers with general
service times, FCFS servers with different service
times for multiple classes, priority scheduling, and
distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mussi:1984:EPE,
author = "Ph. Mussi and Ph. Nain",
title = "Evaluation of parallel execution of program tree
structures",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "78--87",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809315",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We define and evaluate two policies (NA-policy,
A-policy) for parallel execution of program tree
structures. Via a probabilistic model we analytically
determine, for each policy, the Laplace--Stieltjes
transform for the tree processing time distribution.
The acceleration of the program execution time achieved
when adding processors to a single processor
environment, is computed and plotted for each policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sanguinetti:1984:POP,
author = "John Sanguinetti",
title = "Program optimization for a pipelined machine a case
study",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "88--95",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/800264.809316",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Amdahl 580 processor is a pipelined processor
whose performance can be affected by characteristics of
the instructions it executes. This paper describes
certain optimizations made to a set of system software
routines during their development. The optimization
effort was driven by the execution frequencies of
common paths through the programs in question, and by
the execution characteristics of those paths, as shown
by a processor simulator. Path optimization itself was
done with both general program optimization techniques
and with techniques specific to the particular
characteristics of the 580's pipeline. Overall, the
average execution time for these routines was reduced
by over 50\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Turner:1984:PDB,
author = "Rollins Turner and Jeffrey Schriesheim and Indrajit
Mitra",
title = "Performance of a {DECnet} based disk block server",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "96--104",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This report describes an experimental disk block
server implemented for the RSX-11M Operating System
using DECnet. The block server allows user programs on
one system to access files on a disk physically located
on a different system. The actual interface is at the
level of physical blocks and IO transfers. Results of
basic performance measurements are given, and explained
in terms of major components. Performance predictions
are made for servers of this type supporting more
complex workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stavenow:1984:TDC,
author = "Bengt Stavenow",
title = "Throughput-delay characteristics and stability
considerations of the access channel in a mobile
telephone system",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "105--112",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper a performance study of the access
channel in a cellular mobile telephone system /1/ is
presented. The method used in the Cellular System for
multiplexing the population of mobile terminals over
the access channel is a hybrid between the methods
known as CSMA/CD and BTMA. In the paper we extend an
analysis of CSMA/CD to accommodate the function of the
particular random multiaccess protocol. Results are
shown which illustrate the equilibrium channel
performance and the approximate
stability-throughput-delay tradeoff. Finally an
estimate of the average message delay is given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williams:1984:PQD,
author = "Elizabeth Williams",
title = "Processor queueing disciplines in distributed
systems",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "113--119",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A distributed program consists of processes, many of
which can execute concurrently on different processors
in a distributed system of processors. When several
processes from the same or different distributed
programs have been assigned to a processor in a
distributed system, the processor must select the next
process to run. The following two questions are
investigated: What is an appropriate method for
selecting the next process to run? Under what
conditions are substantial gains in performance
achieved by an appropriate method of selection?
Standard processor queueing disciplines, such as
first-come-first-serve and round-robin-fixed-quantum,
are studied. The results for four classes of queueing
disciplines tested on three problems are presented.
These problems were run on a testbed, consisting of a
compiler and simulator used to run distributed programs
on user-specified architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stephens:1984:CBH,
author = "Lindsey E. Stephens and Lawrence W. Dowdy",
title = "Convolutional bound hierarchies",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "120--133",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809320",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The time required to find the exact solution of a
product-form queueing network model of a computer
system can be high. Faster and cheaper methods of
solution, such as approximations, are natural
alternatives. However, the errors incurred when using
an approximation technique should be bounded. Several
recent techniques have been developed which provide
solution bounds. These bounding techniques have the
added benefit that the bounds can be made tighter if
extra computational effort is expended. Thus, a smooth
tradeoff of cost and accuracy is available. These
techniques are based upon mean value analysis. In this
paper a new bounding technique based upon the
convolution algorithm is presented. It provides a
continuous range of cost versus accuracy tradeoffs for
both upper and lower bounds. The bounds produced by the
technique converge to the exact solution as the
computational effort approaches that of convolution.
Also, the technique may be used to improve any existing
set of bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suri:1984:NBB,
author = "Rajan Suri and Gregory W. Diehl",
title = "A new `building block' for performance evaluation of
queueing networks with finite buffers",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "134--142",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new `building block', for analyzing
queueing networks. This is a model of a server with a
variable buffer-size. Such a model enables efficient
analysis of certain queueing networks with blocking due
to limited buffer spaces, since it uses only
product-form submodels. The technique is extensively
tested, and found to be reasonably accurate over a wide
range of parameters. Several examples are given,
illustrating practical situations for which our model
would prove to be a useful performance analysis tool,
specially since it is simple to understand, and easy to
implement using standard software for closed queueing
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Approximate analysis; Blocking; Performance modelling;
Performance prediction; Product form networks; Queueing
networks",
}
@Article{Lavenberg:1984:SAE,
author = "Stephen S. Lavenberg",
title = "A simple analysis of exclusive and shared lock
contention in a database system",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "143--148",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a probabilistic model of locking in a
database system in which an arriving transaction is
blocked and lost when its lock requests conflict with
the locks held by currently executing transactions.
Both exclusive and shared locks are considered. We
derive a simple asymptotic expression for the
probability of blocking which is exact to order $ 1 / N
$ where $N$ is the number of lockable items in the
database. This expression reduces to one recently
derived by Mitra and Weinberger for the special case
where all locks are exclusive.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Becker:1984:MMS,
author = "S. T. Becker and K. M. Rege and B. Sengupta",
title = "A modeling methodology for sizing a computer based
system in a netted environment",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "149--157",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a hybrid model, combining both
analytical and simulation techniques, which was
developed to study the performance of a netted computer
based system. The computer based system that was
modeled is the Facility Assignment and Control System
(FACS). This system is presently being deployed within
several Bell Operating Companies to inventory and
assign central office and outside plant facilities. A
key feature of the model is its ability to characterize
the dynamic nature of FACS. An understanding of this
dynamic nature is necessary in establishing important
operational guidelines such as allowable CPU
utilization, levels of multiprogramming and priority of
transaction processing. In addition, the model allows
the user to investigate the sensitivity of the system
to a wide range of conditions. Typical study items
could include the effect of various load scenarios,
ability of the system to meet performance objectives,
and different hardware configurations. As part of this
paper, both the practical aspects of modeling a netted
computer based system and the theoretical development
of the hybrid model are considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peachey:1984:EIS,
author = "Darwyn R. Peachey and Richard B. Bunt and Carey L.
Williamson and Tim B. Brecht",
title = "An experimental investigation of scheduling strategies
for {UNIX}",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "158--166",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The scheduler used in an operating system is an
important factor in the performance of the system under
heavy load. This paper describes the scheduling
philosophy employed in the UNIX operating system and
outlines the standard scheduling strategies. Modified
strategies which address deficiencies in the standard
strategies are described. The effectiveness of these
modified strategies is assessed by means of performance
experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:1984:PEI,
author = "Daniel A. Menasc{\'e} and Leonardo Lellis P. Leite",
title = "Performance evaluation of isolated and interconnected
token bus local area networks",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "167--175",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809325",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The token bus based local area network, REDPUC,
designed and implemented at the Pont{\'\i}ficia
Universidade Cat{\'o}lica do Rio de Janeiro is briefly
described. Analytic models are presented, which allow
one to obtain an approximation for the average packet
delay, as well as exact upper and lower bounds for the
same performance measure. A performance evaluation of
interconnected local networks is also given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agrawal:1984:UAS,
author = "Subhash C. Agrawal and Jeffrey P. Buzen and Ashok K.
Thareja",
title = "A Unified Approach to Scan Time Analysis of Token
Rings and Polling Networks",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "176--185",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809326",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Token rings and multipoint polled lines are two widely
used network interconnection techniques. The general
concept of cyclic allocation processes is defined and
used to characterize token passing and polling in these
networks. Scan time, the time to poll all nodes at
least once, is an important quantity in the response
time analysis of such networks. We derive expressions
for the mean and variance of scan times using a direct,
operational approach. Resulting expressions are general
and are applicable to both exhaustive and
non-exhaustive service. The effect of higher level
protocols is easily incorporated in the analysis via
calculations of constituent quantities. The expression
for mean scan time is exact and depends only on the
means of message transmission times and arrival rates.
The approximate analysis of variance takes into account
the correlation between message transmissions at
different nodes. Expected level of accuracy is
indicated by an example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandwajn:1984:EAM,
author = "Alexandre Brandwajn and William M. McCormack",
title = "Efficient approximation for models of multiprogramming
with shared domains",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "186--194",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models of multiprogramming systems
with memory constraints and multiple classes of jobs
are important in representing large commercial computer
systems. Typically, an exact analytical solution of
such models is unavailable, and, given the size of
their state space, the solution of models of this type
is approached through simulation and/or approximation
techniques. Recently, a computationally efficient
iterative technique has been proposed by Brandwajn,
Lazowska and Zahorjan for models of systems in which
each job is subject to a separate memory constraint,
i.e., has its own memory domain. In some important
applications, it is not unusual, however, to have
several jobs of different classes share a single memory
``domain'' (e.g., IBM's Information Management System).
We present a simple approximate solution to the shared
domain problem. The approach is inspired by the
recently proposed technique which is complemented by a
few approximations to preserve the conceptual
simplicity and computational efficiency of this
technique. The accuracy of the results is generally in
fair agreement with simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bondi:1984:RTP,
author = "Andr{\'e} B. Bondi and Jeffrey P. Buzen",
title = "The response times of priority classes under
preemptive resume in {M/G/m} queues",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "195--201",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/800264.809328",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Approximations are given for the mean response times
of each priority level in a multiple-class multiserver
M/G/m queue operating under preemptive resume
scheduling. The results have been tested against
simulations of systems with two and three priority
classes and different numbers of servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1984:AQN,
author = "Alexander Thomasian and Paul Bay",
title = "Analysis of {Queueing Network Models} with population
size constraints and delayed blocked customers",
journal = j-SIGMETRICS,
volume = "12",
number = "3",
pages = "202--216",
month = aug,
year = "1984",
CODEN = "????",
DOI = "https://doi.org/10.1145/1031382.809329",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:00:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing Network Models --- QNM's with population size
constraints and delayed blocked customers occur due to
MultiProgramming Level --- MPL constraints in computer
systems and window flow-control mechanisms in Computer
Communication Networks --- CCN's. The computational
cost of existing algorithms is unacceptable for large
numbers of chains and high population sizes. A fast
approximate solution technique based on load
concealment is presented to solve such QNM's. The
solution procedure is non-iterative in the case of
fixed rate Poisson arrivals, while iteration is
required in the case of quasi-random arrivals. Each
iteration requires the solution of a single chain
network of queues comprised of stations visited by each
chain. We then present an algorithm to detect saturated
chains and determine their maximum throughput. A fast
solution algorithm due to Reiser for closed chains is
also extended to the case of quasi-random arrivals. The
accuracy of the proposed solution techniques is
compared to previous techniques by applying it to a
test case, reported in the literature, and a set of
randomly generated examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaffney:1984:IEP,
author = "John E. Gaffney",
title = "Instruction entropy, a possible measure of
program\slash architecture compatibility",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "13--18",
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Sauer:1984:NSS,
author = "Charles H. Sauer",
title = "Numerical solution of some multiple chain queueing
networks",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "19--28",
month = dec,
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1984:SCS,
author = "Alexander Thomasian and Kameshwar Gargeya",
title = "Speeding up computer system simulations using
hierarchical modeling",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "34--39",
month = dec,
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elshoff:1984:PMP,
author = "James L. Elshoff",
title = "The {PEEK} measurement program",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "40--53",
month = "Winter",
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1984:STM,
author = "Anna H{\'a}c",
title = "A survey of techniques for the modeling of
serialization delays in computer systems",
journal = j-SIGMETRICS,
volume = "12",
number = "4",
pages = "54--56",
month = dec,
year = "1984/1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mosleh:1985:BPR,
author = "Ali Mosleh and E. Richard Hilton and Peter S. Browne",
title = "{Bayesian} probabilistic risk analysis",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "5--12",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041839",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As modern business and financial institutions have
come to rely more and more on large scale computers for
management support, the magnitude of the risks and
their potential consequences has increased
correspondingly. In addition, large systems involving
multiprocessing, resource sharing, and distributed
processing have given rise to a new generation of risks
due to the increased vulnerabilities of such large
scale systems and the potential for fraudulent or
malicious misuse of their resources. Somehow, these
risks must be managed since either deliberate or
accidental impairment of these large scale systems can
have serious consequences for the business. That is,
threats must be identified, and the likelihood of their
occurrences and the elements of the system vulnerable
to each of these threats must be established. Any
program for risk management must begin with a risk
analysis to compare the vulnerabilities in order to
pinpoint and rank the system's weaknesses and to
provide a guide for the cost-effective, systematic
reduction of the probability of the system's being
subverted or otherwise impaired.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gong:1985:CMB,
author = "Huisheng Gong and Monika Schmidt",
title = "A complexity measure based on selection and nesting",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "14--19",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041840",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many concepts concerning the quantification of program
complexity have been developed during the last few
years. One of the most accepted and easy-to-apply
complexity measures, McCabe's cyclomatic number, has
been discussed and improved in several studies. The
cyclomatic number only considers the decision structure
of a program. Therefore, this paper proposes a new
method for calculating program complexity, the concept
of postdomination. This takes into account the degree
of nesting of a program. Combining this method and the
cyclomatic number, a new complexity measure will be
defined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cyclomatic number; degree of nesting; forward
dominance; program complexity",
}
@Article{Knudson:1985:PMS,
author = "Michael E. Knudson",
title = "A performance measurement and system evaluation
project plan proposal",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "20--31",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041841",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This document is an outline for a performance
measurement and evaluation effort. Performance
measurements consist of producing data showing
frequency and execution times for components of
computer systems. Components implies: (1) hardware, (2)
ucode, (3) macrocode, (4) applications software,
(5)systems (e.g., utilities in an operating-system
environment). Evaluation can be broken down into
several areas. Principle areas of interest are
comparative performance evaluation and an analysis of a
system's structure/behavior. Comparative evaluation
consists of: relative performance measurements of
different machines; a summary of collected data; and an
analysis of a system's structure, including the
production of data describing the interrelationship of
system components. This data may be narrative, but the
preferred technique is a graphical presentation showing
component relationships.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ejiogu:1985:SMS,
author = "Lem O. Ejiogu",
title = "A simple measure of software complexity",
journal = j-SIGMETRICS,
volume = "13",
number = "1",
pages = "33--47",
month = jun,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041838.1041842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Every science mast have its own method of
investigation built on a sound foundation that is
empirical, justifiable and verifiable. Software
metrics, too, can benefit from this principle. A
complex aggregate of tools, ideas, methodologies,
programming languages, and varieties of applications go
into the development, design, manufacture and
maintenance of software. The combinations impose
another level of complexity on software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eager:1985:CRI,
author = "Derek L. Eager and Edward D. Lazowska and John
Zahorjan",
title = "A comparison of receiver-initiated and
sender-initiated adaptive load sharing (extended
abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "1--3",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One goal of locally distributed systems is to
facilitate resource sharing. Most current locally
distributed systems, however, share primarily data,
data storage devices, and output devices; there is
little sharing of computational resources. Load sharing
is the process of sharing computational resources by
transparently distributing the system workload. System
performance can be improved by transferring work from
nodes that are heavily loaded to nodes that are lightly
loaded. Load sharing policies may be either static or
adaptive. Static policies use only information about
the average behavior of the system; transfer decisions
are independent of the actual current system state.
Static policies may be either deterministic (e.g.,
``transfer all compilations originating at node $A$ to
server $B$'') or probabilistic (e.g., ``transfer half
of the compilations originating at node $A$ to server
$B$, and process the other half locally''). Numerous
static load sharing policies have been proposed. Early
studies considered deterministic rules [Stone 1977,
1978; Bokhari 1979]. More recently, Tantawi and Towsley
[1985] have developed a technique to find optimal
probabilistic rules. The principal advantage of static
policies is their simplicity: there is no need to
maintain and process system state information. Adaptive
policies, by contrast, are more complex, since they
employ information on the current system state in
making transfer decisions. This information makes
possible significantly greater performance benefits
than can be achieved under static policies. This
potential was clearly indicated by Livny and Melman
[1982], who showed that in a network of homogeneous,
autonomous nodes there is a high probability that at
least one node is idle while tasks are queued at some
other node, over a wide range of network sizes and
average node utilizations. In previous work [Eager,
Lazowska \& Zahorjan 1984] we considered the
appropriate level of complexity for adaptive load
sharing policies. (For example, how much system state
information should be collected, and how should it be
used in making transfer decisions?) Rather than
advocating specific policies, we considered fairly
abstract strategies exhibiting various levels of
complexity. We demonstrated that the potential of
adaptive load sharing can in fact be realized by quite
simple strategies that the use only small amounts of
system state information. This result is important
because of a number of practical concerns regarding
complex policies: the effect of the overhead required
to administer a complex policy, the effect of the
inevitable inaccuracies in detailed information about
system state and workload characteristics, and the
potential for instability. (We consciously use the
phrase ``load sharing'' rather than the more common
``load balancing'' to highlight the fact that load
balancing, with its implication of attempting to
equalize queue lengths system-wide, is not an
appropriate objective.) Adaptive load sharing policies
can employ either centralized or distributed control.
Distributed control strategies can be of two basic
types (although intermediate strategies also are
conceivable): sender-initiated (in which congested
nodes search for lightly loaded nodes to which work may
be transferred), and receiver-initiated (in which
lightly loaded nodes search for congested nodes from
which work may be transferred). Our earlier paper
considered distributed, sender-initiated policies --- a
sufficiently rich class to allow us to answer the
fundamental questions of policy complexity that we were
addressing. In the course of understanding the reasons
for the degradation of these policies at high system
loads, we were led to consider receiver-initiated
policies as a possible alternative. The comparison of
receiver-initiated and sender-initiated adaptive load
sharing is the purpose of the present paper. There have
been several experimental studies, using prototypes and
simulation models, of specific (typically fairly
complex) adaptive load sharing policies [Bryant \&
Finkel 1981; Livny \& Melman 1982; Kreuger \& Finkel
1984; Barak \& Shiloh 1984]. Both sender-initiated
policies and receiver-initiated policies have been
considered. However, there has not previously been a
rigorous comparison of these two strategies. Such a
comparison is made difficult by the problem of choosing
appropriate representative policies of each type, and
by the potentially quite different costs incurred in
effecting transfers. (Receiver-initiated policies
typically will require the transfer of executing tasks,
which incurs substantial costs in most systems [Powell
\& Miller 1983]. Sender-initiated policies naturally
avoid such costly transfers, since tasks can be
transferred upon arrival, prior to beginning
execution.) Our present paper is similar to our
previous work in that our purpose, rather than to
advocate specific policies, is to address a fundamental
question concerning policies in general: How should
system state information be collected and load sharing
actions initiated --- by potential receivers of work,
or by potential senders of work? In studying this
question we consider a set of abstract policies that
represent only the essential aspects of
receiver-initiated and sender-initiated load sharing
strategies. These policies are investigated using
simple analytic models. Our objective is not to
determine the absolute performance of particular load
sharing policies, but rather to gain intuition
regarding the relative merits of the different
approaches under consideration. We represent locally
distributed systems as collections of identical nodes,
each consisting of a single processor. The nodes are
connected by a local area network (e.g., an Ethernet).
All nodes are subjected to the same average arrival
rate of tasks, which are of a single type. In contrast
to most previous papers on load sharing, we represent
the cost of task transfer as a processor cost rather
than as a communication network cost. It is clear from
measurement and analysis [Lazowska et al. 1984] that
the processor costs of packaging data for transmission
and unpackaging it upon reception far outweigh the
communication network costs of transmitting the data.
We study three abstract load sharing policies,
comparing their performance to each other and to that
of a system in which there is no load sharing. The
Sender policy is used a representative of
sender-initiated load sharing strategies. The Receiver
and Reservation policies are used as representatives of
receiver-initiated load sharing strategies; unlike the
Receiver policy, the Reservation policy will transfer
only newly arriving tasks. In a bit more detail: Sender
In our earlier work concerning the appropriate level of
complexity for adaptive load sharing schemes, we
identified two sub-policies of sender-initiated
strategies. The transfer policy determines whether a
task should be processed locally or remotely. The
location policy determines to which node a task
selected for transfer should be sent. In that previous
study, we considered threshold transfer policies, in
which each node uses only local state information. An
attempt is made to transfer a task originating at a
node if and only if the number of tasks already in
service or waiting for service (the node queue length)
is greater than or equal to some threshold T. We
considered various location policies spanning a range
of complexity. We found that the use of a complex
location policy yields only slight improvement over the
use of a simple location policy that, like the transfer
policy, uses threshold information. In this threshold
location policy, a node is selected at random and
probed to determine whether the transfer of a task to
that node would place the node above the threshold T.
If not, then the task is transferred. If so, then
another node is selected at random and probed in the
same manner. This continues until either a suitable
destination node is found, or the number of probes
reaches a static probe limit, Lp. In the latter case,
the originating node must process the task. (The use of
probing with a fixed limit, rather than broadcast,
ensures that the cost of executing the load sharing
policy will not be prohibitive even in large networks.
The performance of this policy was found to be
surprisingly insensitive to the choice of probe limit:
the performance with a small probe limit, e.g., 3 or 5,
is nearly as good as the performance with a large probe
limit, e.g., 20.) The sender-initiated policy with a
threshold transfer policy and a threshold location
policy was found to yield performance not far from
optimal, particularly at light to moderate system
loads. For this reason, and because of its simplicity,
we choose this policy to serve as the representative of
sender-initiated strategies for the comparison that is
the subject of the present paper, and term it here the
Sender policy. Receiver To facilitate comparison
between sender-initiated strategies and
receiver-initiated strategies, a representative policy
of the latter class should be as similar as possible to
the Sender policy. In particular, it should utilize
threshold-type state information, and have a bound Lp
on the number of remote nodes whose state can be
examined when making a task transfer decision. In the
Receiver policy, a node attempts to replace a task that
has completed processing if there are less than $T$
tasks remaining at the node. A remote node is selected
at random and probed to determine whether the transfer
of a task from that node would place its queue length
below the threshold value T. If not, and if the node is
not already in the process of transferring a task, a
task is transferred to the node initiating the probe.
Otherwise, another node is selected at random and
probed in the same manner. This continues until either
a node is found from which a task can be obtained, or
the number of probes reaches a static probe limit, Lp.
In the latter case, the node must wait until another
task departs before possibly attempting again to
initiate a transfer. (This is completely analogous to
the operation of the Sender policy, in which a node
that fails to find a suitable destination to which to
transfer a task must wait until another task arrives
before attempting again to initiate a transfer.) The
Receiver policy with T=1 has been studied using a
simulation model by Livny and Melman [1982], who term
it the ``poll when idle algorithm''. Reservation The
Reservation policy, like the Sender policy but in
contrast to the Receiver policy, will only transfer
newly arriving tasks. This may be advantageous in
multiprogramming systems in which nodes attempt to give
each of the tasks present some share of the total
available processing power. If the Receiver policy is
used in such a system, almost all task transfers will
involve executing tasks, and may be substantially more
costly than transfers of non-executing tasks. In the
Reservation policy, as in the Receiver policy, a node
attempts to replace a task that has completed
processing if there are less than $T$ tasks remaining
at the node. A remote node is selected at random and
probed to determine whether the transfer of the next
task to originate at that node would place its queue
length below the threshold value T. If not, and if no
other ``reservation'' is pending for this node, then
this next arrival is ``reserved'' by the probing node;
it is transferred upon arrival if no other tasks have
arrived at the probing node by that time. If the
reservation attempt is not successful, another node is
selected at random and probed at the same manner. This
continues until either a node is found at which the
next arrival can be reserved, or the number of probes
reaches a static probe limit, Lp. In the latter case,
the node must wait until another task departs before
possibly attempting again to reserve a task. Our
evaluation of this policy is optimistic. (Even this
optimistic evaluation predicts unsatisfactory
performance.) At the time a reservation is attempted,
we assume that the probed node can ``see into the
future'' to the arrival time of the (potentially)
reserved task. The reservation is made only if the
probed node will be above threshold at that time. Also,
even when a reservation request is successful, the
probed node considers this next arrival as ineligible
for other reservation requests only if it will actually
be transferred to the node holding the reservation.
Finally, we assume that the probability that a task
will be processed locally rather than transferred,
given that it arrives when the node queue length is at
or over threshold, is independent of the prior history
of the task arrivals and departures. In fact, this
probability is higher for tasks with shorter
interarrival times. Many of the results of our study
are illustrated in the accompanying figure. While the
figure illustrates specific choices of parameter
values, the results are quite robust with respect to
these choices; a substantial part of the full paper is
devoted to demonstrating this robustness. The results
include: Both receiver-initiated and sender-initiated
policies offer substantial performance advantages over
the situation in which no load sharing is attempted
(shown as M/M/1 in the figure). Sender-initiated
policies are preferable to receiver-initiated policies
at light to moderate system loads. Receiver-initiated
policies are preferable at high system loads, but only
if the costs of task transfer under the two strategies
are comparable. If the cost of task transfers under
receiver-initiated policies is significantly greater
than under sender-initiated policies (for example,
because executing tasks must be transferred), then
sender-initiated policies provide uniformly better
performance. Modifying receiver-initiated policies to
transfer only newly-arrived tasks (so as to avoid the
cost of transferring executing tasks) yields
unsatisfactory performances.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelernter:1985:ACP,
author = "David Gelernter and Sunil Podar and Hussein G. Badr",
title = "An adaptive communications protocol for network
computers (extended abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "4--5",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A network computer is a collection of computers
designed to function as one machine. On a network
computer, as opposed to a multiprocessor, constituent
subcomputers are memory-disjoint and communicate only
by some form of message exchange. Ensemble
architectures like multiprocessors and network
computers are of growing interest because of their
capacity to support parallel programs, where a parallel
program is one that is made up of many
simultaneously-active, communicating processes.
Parallel programs should, on an appropriate
architecture, run faster than sequential programs, and,
indeed, good speed-ups have been reported in parallel
programming experiments in several domains, amongst
which are AI, numerical problems, and system
simulation. Our interest lies in network computers,
particularly ones that range in size from several
hundred nodes to several thousand. Network computers
may be organized in either of two basic ways: their
nodes may communicate over a shared bus (or series of
buses), as in S/Net; or over point-to-point links, as
in Cosmic Cube and the Transputer Network. The work to
be presented deals with the point-to-point class, the
elements of which we shall refer to as ``linked
networks''. Linked networks face a fundamental
communication problem. Unless they are completely
connected (which is rarely possible), two communicating
nodes will not necessarily be connected by a single
link. Messages between nodes must therefore, in
general, travel over several links and be processed by
several intermediate nodes. Communication delays
increase with the length of the traveled path. Network
computer designers therefore provide networks the
diameters of which are small relative to their size,
and network operating systems will attempt to place
communicating processes as close to each other as
possible. We present a communication protocol for
linked networks that was designed specifically for
network computers. Staged Circuit Switching is a
communication protocol that combines aspects of
store-and-forwarding with aspects of circuit switching,
where circuit switching refers to the class of
protocols in which a communicating source and
destination first construct a dedicated path or circuit
between them, then communicate directly over this path.
The path may be a physical connection, as in
spaced-switched circuit-switching, or a series of
dedicated slots in time-division multiplexing switches,
as in time-switching protocols. The
stage-circuit-switching design is strongly related to
spaced-switched circuit-switching and encompasses both
the protocol itself and a communication architecture to
support it. In staged circuit switching, each message
constructs for itself the longest physical circuit that
it can without waiting for links. When a message is to
be sent, a header that records the message's source and
destination is sent propagating through the network
towards the destination node; the header seizes each
free link along its path and incorporates it into a
growing circuit. When it meets a busy link, or arrives
at its destination, circuit building stops, the
message's data portion is transmitted and acknowledged
over the existing circuit, and the circuit is released.
A message that has not arrived at its destination then
gathers itself together and plunges onward in the same
fashion. In an empty network then, staged circuit
switching is the same as circuit switching: each
message is transmitted over a direct circuit from
source to destination. In a heavily loaded network, it
is the same as store-and-forwarding: each next-link is
busy, each circuit is therefore only one link long, and
the message proceeds hop by hop. The protocol combines
the speed benefits of circuit switching at light
traffic loads, with the high bandwidth advantages of
store-and-forwarding at heavy loads. We have carried
out extensive simulation studies to evaluate the
dynamics of staged circuit switching from the point of
view of message delays, throughput, circuit lengths,
efficiency, implementation, and so on. The studies were
implemented in the context of a toroidal topology of
diameter 32, yielding a 1024-node network. Uniform
source-to-destination distributions were used. Both the
topology and the source-to-destination distributions
are analyzed. An analysis of network saturation based
on mean values is also given. Staged circuit switching
unambiguously emerges as a strong protocol with
superior performance characteristics than either
classical store-and-forwarding or circuit switching,
particularly with regards to adaptability to varying
network loads and in providing a consistently high
effective network bandwidth. On the basis of our
results the protocol is proposed as a suitable
candidate for linked networks. Its attractiveness is
further enhanced by its potential ability to
continually reconfigure the network dynamically at
runtime to optimize for observed traffic patterns.
Heavily-used circuits may be left in place over longer
periods than a single message transmission. In this
way, the system constantly rearranges the network
topology in order to bring heavily-communicating
distant nodes closer together, thereby acting as a
``communication cache''. A ``cache hit'' would
correspond to finding the desired destination node one
hop away from a given source. Effective exploitation of
this capability is the subject of ongoing research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1985:ADC,
author = "Erol Gelenbe and David Finkel and Satish K. Tripathi",
title = "On the availability of a distributed computer system
with failing components",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "6--13",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a model for distributed systems with
failing components. Each node may fail and during its
recovery the load is distributed to other nodes that
are operational. The model assumes periodic
checkpointing for error recovery and testing of the
status of other nodes for the distribution of load. We
consider the availability of a node, which is the
proportion of time a node is available for processing,
as the performance measure. A methodology for
optimizing the availability of a node with respect to
the checkpointing and testing intervals is given. A
decomposition approach that uses the steady-state flow
balance condition to estimate the load at a node is
proposed. Numerical examples are presented to
demonstrate the usefulness of the technique. For the
case in which all nodes are identical, closed form
solutions are obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conway:1985:RNE,
author = "A. E. Conway and N. D. Georganas",
title = "{RECAL} --- a new efficient algorithm for the exact
analysis of multiple-chain closed queueing networks
(abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "14--14",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "RECAL, a Recursion by Chain Algorithm for computing
the mean performance measures of product-form
multiple-chain closed queueing networks, is presented.
It is based on a new recursive expression which relates
the normalization constant of a network with $r$ closed
routing chains to those of a set of networks having $
(r - l)$ chains. It relies on the artifice of breaking
down each chain into constituent sub-chains that each
have a population of one. The time and space
requirements of the algorithm are shown to be
polynomial in the number of chains. When the network
contains many routing chains the proposed algorithm is
substantially more efficient than the convolution or
mean value analysis algorithms. The algorithm therefore
extends the range of queueing networks which can be
analyzed efficiently by exact means. A numerical
example is given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balbo:1985:MPS,
author = "G. Balbo and S. C. Bruell and S. Ghanta",
title = "Modeling priority schemes",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "15--26",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop Generalized Stochastic Petri Net models for
several priority queueing disciplines. The building
blocks of these models are explained and many variants
are easily derivable from them. We then combine these
building blocks with product-form queueing network
models. Numerical results are provided that illustrate
the effectiveness of the method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "generalized stochastic Petri nets; head-of-the-line;
preemptive resume; priorities; product-form queueing
networks; reorientation; time-out",
}
@Article{Walstra:1985:NNQ,
author = "Robbe J. Walstra",
title = "Nonexponential networks of queues: a maximum entropy
analysis",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "27--37",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317807",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We will propose a new, iterative method for
approximately analyzing closed networks of queues with
nonexponential service time distributions and FCFS
scheduling. Our method is based on the Principle of
Maximum Entropy and produces results which, first, are
consistent with the fundamental Work Rate Theorem and,
second, are exact for separable networks of queues.
Considering accuracy and execution time
characteristics, our method offers a viable alternative
to Marie's homogeneous approximation method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Calzarossa:1985:SSC,
author = "Maria Calzarossa and Domenico Ferrari",
title = "A sensitivity study of the clustering approach to
workload modeling (extended abstract)",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "38--39",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a paper published in 1984 [Ferr84], the validity of
applying clustering techniques to the design of an
executable model for an interactive workload was
discussed. The following assumptions, intended not to
be necessarily realistic but to provide sufficient
conditions for the applicability of clustering
techniques, were made: The system whose workload is to
be modeled is an interactive system, and its
performance can be accurately evaluated by solving a
product-form closed queueing network model. The
behavior of each interactive user can be adequately
modeled by a probabilistic graph (called a user
behavior graph); in such a graph, each node represents
an interactive command type, and the duration of a
user's stay in the node probabilistically equals the
time the user spends typing in a command of that type,
waiting for the system's response, and thinking about
what command should be input next. The interactive
workload to be modeled is stationary, and the workload
model to be constructed is intended to reproduce its
global characteristics (not those of some brief excerpt
from it exhibiting peculiar dynamics), hence to be
stationary as well. It was shown in [Ferr84] that,
under these assumptions, clustering command types
having the same probabilistic resource demands does not
affect the values of the performance indices the
evaluators are usually interested in, provided the
visit ratio to each node in the reduced (i.e.,
post-clustering) user behavior graph is equal to the
sum of the visit ratios the cluster's components had in
the original graph. Since the reduction we have just
described is equivalent to replacing each cluster with
one or more representatives of its components, and
since this is also the goal of applying clustering
techniques to the construction of executable workload
models substantially more compact than the original
workload to be modeled, this result shows that such
techniques are valid (i.e., produce accurate models)
when the assumptions and the conditions mentioned above
are satisfied. One condition which in practice is never
satisfied, however, is that the clustered commands are
characterized by exactly the same resource demands. In
fact, clustering algorithms are non-trivial just
because they have to recognize ``nearness'' among
commands with different characteristics, and group
those and only those commands whose resource demands
are sufficiently similar (where the notion of
similarity is to be defined by introducing that of
distance between two commands). Thus, the question of
the sensitivity of a workload model's accuracy to the
inevitable dispersion of the characteristics of a
cluster's components immediately arises. We know that,
if an adequate product-form model of an interactive
system can be built, if the users' behaviors can be
accurately modeled by probabilistic graphs, and if the
workload and the model of it to be constructed are
stationary, then a workload model in which all commands
with identical characteristics are grouped together and
modeled by a single representative is an accurate model
of the given workload (i.e., the model produces the
same values of the performance indices of interest as
the modeled workload when it is processed by a given
system). This is true, of course, provided the visit
ratios of the workload model's components equal the
sums of those of the corresponding workload components.
If we now apply a clustering algorithm to the given
workload, thereby obtaining clusters of similar, but
not identical, commands, and we build a workload model
by assembling cluster representatives (usually one per
cluster, for instance with demands corresponding to
those of the cluster's center of mass), by how much
will the values of the performance indices produced by
the workload model running on the given system differ
from those produced by the workload to be modeled? As
with several other problems, this could be attacked by
a mathematical approach or by an experimental one.
While a successful mathematical analysis of the
sensitivity of the major indices to the dispersion in
the resource demands of the commands being clustered
together would provide more general results, it would
also be likely to require the introduction of
simplifying assumptions (for example, having to do with
the distributions of the resource demands in a cluster
around its center of mass) whose validity would be
neither self-evident nor easy to verify experimentally.
On the other hand, an experimental approach achieves
results which, strictly speaking, are only applicable
to the cases considered in the experiments.
Extrapolations to other systems, other workloads, other
environments usually require faith, along with
experience, common sense, and familiarity with real
systems and workloads. This inherent lack of generality
is somehow counterbalanced, however, by the higher
degree of realism that is achievable with an
experimental investigation. In particular, when in a
study the properties of workloads are to play a crucial
role (there are very few studies indeed in which this
is not the case!), using a mathematical approach is
bound to raise about such properties questions that are
either very difficult or impossible to answer.
Primarily for this reason, and knowing very well the
limitations in the applicability of the results we
would obtain, we decided to adopt an experimental
approach. Since the question we were confronted with
had never been answered before (nor, to our knowledge,
had it been asked), we felt that our choice was
justified by the exploratory nature of the study. If
the resulting sensitivity were to turn out to be high,
we could conclude that not even under the above
assumptions can clustering techniques be trusted to
provide reasonable accuracy in all cases and hence
should not be used, or used with caution in those cases
(if they exist) in which their accuracy might be accept
able. If, on the other hand, the sensitivity were low,
then we could say that, in at least one practical case,
clustering techniques would have been shown to work
adequately (of course, under all the other assumptions
listed above). The rationale of this investigation
might be questioned by asking why it would not be more
convenient to test the validity of clustering
techniques directly, that is, by comparing the
performance indices produced by a real workload to
those produced by an executable model (artificial
workload) built according to a clustering technique.
Our answer is that, in this study as well as in
[Ferr84], we are more interested in understanding the
limitations and the implications of clustering and
other workload model design methods than in evaluating
the accuracy of clustering in a particular case. In
other words, we are not so much keen on finding out
whether the errors due to clustering are of the order
of 10\% or of 80\%, but we want to be able to
understand why they are only 10\% or as large as 80\%,
respectively. Thus, we need to decompose the total
error into the contributions to it of the various
discrepancies that any real situation exhibits with
respect to the ideal one. This paper describes a study
primarily performed to assess the magnitude of one such
contribution, that of the dispersion of the resource
demands of clustered commands. An experimental
approach, in the ease being considered here, requires
first of all that a workload for the experiment be
selected. Then, that workload is to be measured, in
order to obtain the values of the parameters defined by
the desired characterization. Next, an executable
workload model is to be built by applying a clustering
technique to the real workload selected. Then, the
workload and its model are to be run on the same
system, so that the model's accuracy can be evaluated
by comparing the performance indices produced by them.
As our study is to try to isolate the sensitivity of
that accuracy to the differences in demands among the
commands that have been grouped into the same cluster,
these differences must be made the only source of
inaccuracies in the performance produced by the model.
To isolate this contribution to the error from all of
the others, the latter sources should be eliminated.
Finally, the experiment is to be carried out, and its
results interpreted. The results show that, on the
whole, the clustering method for workload model design
is reasonably accurate in the context of the case
examined in our study. The sensitivities we found were
reasonably low. Thus, we can state that, in at least
one practical case and under the assumptions discussed
in this paper, clustering techniques for executable
workload model design have been shown to work well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raghavan:1985:CIU,
author = "S. V. Raghavan and R. Kalyanakrishnan",
title = "On the classification of interactive user behaviour
indices",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "40--48",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317809",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concepts of user behaviour entropy and user
behaviour mobility are proposed as indices for the
description of user behaviour. The user behaviour
indices are derivable from the mode probability vector
and the mode transition matrix which adequately
describe the behaviour dynamics of an interactive user.
The user behaviour indices reduce the ((n*n)+n)
dimensional parameter space to two dimensions only for
classification, without loss of information related to
the user behaviour dynamics. The classification of the
users in an interactive educational environment using
the user behaviour indices is presented as a case
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Verkamo:1985:ERL,
author = "A. Inkeri Verkamo",
title = "Empirical results on locality in database
referencing",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "49--58",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Database referencing behaviour is analyzed with
respect to locality features. The analysis is based on
database reference strings collected from several runs
of typical batch programs accessing a real database.
Locality of reference is measured by the stack distance
probability distribution, the number of block faults,
and a locality measure based on the memory reservation
size. In all the experiments, locality of reference is
observed, but it is found to be weaker than in code
referencing or even in some previous studies on
database referencing. The phase/transition concept used
in virtual memory systems is not well applicable to
database referencing, since a large part of the
locality set is constantly changing. The disruption of
the phases is predominantly due to random referencing
of data blocks. The references to index blocks show
stronger locality. In some special cases, sequentiality
is observed in the use of the data blocks. In general,
neither replacement strategies developed for virtual
memory systems nor prefetching techniques seem adequate
for performance improvement of database referencing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Khelalfa:1985:DCS,
author = "Halin M. Khelalfa and Anneliese K. von Mayrhauser",
title = "Degradable computer systems with dependent
subsystems",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "59--68",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317811",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When building a model for degradable computer systems,
it is not sufficient to merely quantify reliability and
performance measures. These indices must be
mathematically sound if they are to be used to design
such systems in an optimal way. The paper presents an
analysis of design optimisation for degradable computer
systems and shows how this particular application leads
to a system model with interdepedent subsystems. A
procedure is presented on how to solve the resulting
Markov model. Its computational complexity is compared
to another solution method and shown to be largely more
efficient.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chillarege:1985:ESW,
author = "Ram Chillarege and Ravishankar K. Lyer",
title = "The effect of system workload on error latency: an
experimental study",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "69--77",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317812",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, a methodology for determining and
characterizing error latency is developed. The method
is based on real workload data, gathered by an
experiment instrumented on a VAX 11/780 during the
normal workload cycle of the installation. This is the
first attempt at jointly studying error latency and
workload variations in a full production system.
Distributions of error latency were generated by
simulating the occurrence of faults under varying
workload conditions. A family of error latency
distributions so generated illustrate that error
latency is not so much a function of when in time a
fault occurred but rather a function of the workload
that followed the failure. The study finds that the
mean error latency varies by a 1 to 8 (hours) ratio
between high and low workloads. The method is general
and can be applied to any system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonsalves:1985:PCT,
author = "Timothy A. Gonsalves",
title = "Performance characteristics of two {Ethernets}: an
experimental study",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "78--86",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Local computer networks are increasing in popularity
for the interconnection of computers for a variety of
applications. One such network that has been
implemented on a large scale is the Ethernet. This
paper describes an experimental performance evaluation
of a 3 and a 10 Mb/s Ethernet. The effects of varying
packet length and transmission speed on throughput,
mean delay and delay distribution are quantified. The
protocols are seen to be fair and stable. These
measurements span the range from the region of high
performance of the CSMA/CD protocol to the upper limits
of its utility where performance is degraded. The
measurements are compared to the predictions of
existing analytical models. The correlation is found to
range from good to poor, with more sophisticated models
yielding better results than a simple one.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chlamtac:1985:PIS,
author = "I. Chlamtac and M. Eisinger",
title = "Performance of integrated services (voice\slash data)
{CSMA\slash CD} networks",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "87--93",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a voice/data integrated local area
communication system. Due to the high suitability of
CSMA/CD protocols for data communication and the
existence of real time voice delay constraints we
consider a hybrid TDM/CSMA/CD protocol. This model
fundamentally differs from the very well documented
voice/data integrated systems in point to point
networks in which both voice and data users are
assigned fixed duration time slots for transmission.
The TDM/CSMA/CD integrated system performance is
analysed and basic performance tradeoffs in the system
design are manifested.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chlamtac:1985:AMH,
author = "I. Chlamtac and M. Eisinger",
title = "An analytic model of the hyperchannel network using
multiple channel architecture",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "94--104",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The HYPERchannel communication network configured
around one to four channels is considered. We develop a
queueing model which characterizes the network
performance as a function of the number of channels,
the channel load and the number of stations in the
network. The model is used to analyze the multichannel
system performance and to evaluate the effect of the
channel selection mechanism, as implemented by the
HYPERchannel station interface units, on the
performance. It is shown that the network bandwidth
utilization is directly related to the channel
selection process and that it varies with network
configuration and load. These observed relations are
especially significant since they are most pronounced
in networks with small number of stations, the typical
configuration in the majority of operational
HYPERchannel networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bleistein:1985:APM,
author = "Sandra Bleistein and Shin-Sun Cho and Robert T.
Goettge",
title = "Analytic performance model of the {U.S.} en route air
traffic control computer systems",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "105--115",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An analytic performance modeling case study of a
complex command and control computer system is
presented. A queueing network model of the system was
developed and validated. Features of the model found to
be critical to its accuracy were detailed software
models, general service time distributions, and models
of transient response time behavior. Response time
prediction accuracy of the model was validated to 20
percent for moderate device utilizations. The study
shows that analytic techniques can be successfully
applied to performance modeling of complex systems.
Prediction of response time percentile values and
modeling of transient effects are identified as two
areas where improved analytic techniques would enhance
performance engineering of such systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dowdy:1985:AUM,
author = "Lawrence W. Dowdy and Manvinder S. Chopra",
title = "On the applicability of using multiprogramming level
distributions",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "116--127",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A computer system's workload is represented by its
multiprogramming level, which is defined as the number
of tasks (jobs, customers) which actively compete for
resources within the system. In a product-form queuing
network model of the system, the workload is modeled by
assuming that the multiprogramming level is either
fixed (i.e., closed model) or that the multiprogramming
level depends upon an outside arrival process (i.e.,
open model). However, in many actual systems, closed
and open models are both inappropriate since the
multiprogramming level is neither fixed nor governed by
an outside arrival process. In an actual system., the
multiprogramming level varies due to features such as
task spawning, killing, blocking, parallel processing,
and/or simultaneous resource possession. The
multiprogramming level is a random variable with an
associated distribution. This paper demonstrates that
improved models can result from using this
multiprogramming level distribution information.
Several examples relative to open versus closed models,
subsystem models, actual system models, and blocking
models are given which demonstrate the applicability of
using multiprogramming level distributions. This
applicability, shown via the examples, is the main
contribution of the paper. The examples also motivate
interesting theoretical results relating to open
models, closed models, and subsystem models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "blocking; multiprogramming level distributions; open
and closed queuing networks; subsystem modeling",
}
@Article{Krzesinski:1985:MQN,
author = "A. E. Krzesinski and P. Teunissen",
title = "Multiclass queueing networks with population
constrainted subnetworks",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "128--139",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A Multiclass Queueing Network model (MQN) is
partitioned into a set of disjoint subnetworks.
Population constraints are applied to each subnetwork
such that within each subnetwork each population chain
is either subject to an individual population
constraint, or a group of chains may be subject to a
common (shared) population constraint. Such population
constraints are necessary in order to model
multiprogramming level constraints in mainframe
computer systems and window flow control mechanisms in
computer communication networks. A computationally
efficient approximate solution method is developed for
solving MQN's with population constraints. Each
subnetwork is reduced to a single approximately flow
equivalent composite centre by assuming that the effect
of other chains on a given chain can be adequately
represented by their average customer populations. The
accuracy of the population constraint approximation is
compared against previous techniques by applying it to
a set of test cases for which simulation solutions have
previously been reported. The accuracy of the
approximation technique is found to be good and in
general is an improvement over previously published
concurrency constraint approximations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solution; mean value analysis; multiclass
queueing networks; product form solutions",
}
@Article{Branwajn:1985:NSI,
author = "Alexandre Branwajn and Yung-Li Lily Jow",
title = "A note on service interruptions",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "140--148",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This note is devoted to a few remarks on the
performance evaluation of systems with service
interruptions such as priority queues for lower
priority customers, systems subject to breakdowns, etc.
Recent work on priority queues has shown that a popular
approximation method, the ``reduced occupancy
approximation'', can be exceedingly inaccurate for a
range of parameter values. We identify a cause of
inaccuracy and, hence, propose a simple correction that
provides a substantial improvement in the results.
Using the example of a simple model with service
interruptions, we show also that conditional
probabilities can be of value in deriving recurrent
solutions to some problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
xxnote = "Check: author may be Brandwajn??",
}
@Article{Plateau:1985:SSP,
author = "Brigitte Plateau",
title = "On the stochastic structure of parallelism and
synchronization models for distributed algorithms",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "147--154",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper a new technique to handle complex Markov
models is presented. This method is based on a
description using stochastic automatas and is dedicated
to distributed algorithms modelling. One example of a
mutual exclusion algorithm in a distributed environment
is extensively analysed. The mathematical analysis is
based on tensor algebra for matrices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snyder:1985:ANS,
author = "Patricia M. Snyder and William J. Stewart",
title = "An approximate numerical solution for multiclass
preemptive priority queues with general service time
distributions",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "155--165",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper an approximate numerical solution for a
multiclass preemptive priority single server queue is
developed. The arrival process of each class follows a
Poisson distribution. The service time distribution
must have a rational Laplace transform, but is
otherwise arbitrary and may be different for different
classes. The work reported here was motivated by a
desire to compute the equilibrium probability
distribution of networks containing preemptive priority
servers. Such networks are frequently encountered when
modeling computer systems, medical care delivery
systems and communication networks. We wish to use an
iterative technique which constructs a series of two
station networks consisting of one station from the
original network and one ``complementary'' station
whose behavior with respect to the original station
mimics that of the rest of the network. At each
iteration, it is necessary to compute the equilibrium
probability distribution of one or more preemptive
priority queues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hevner:1985:EOD,
author = "Alan R. Hevner",
title = "Evaluation of optical disk systems for very large
database applications",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "166--172",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optical Disk Systems have significant advantages over
conventional magnetic mass storage media for very large
database applications. Among other features, optical
disk systems offer large capacity and high transfer
rate. A critical problem is how to integrate the
optical disk system into a total application system
environment while maintaining the high performance
capabilities of the optical disk. In this paper the
performance of optical disk system configurations under
realistic application environments is analyzed via
queueing models. The results provide several important
guidelines for the use of optical disk systems on large
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Houtekamer:1985:LDC,
author = "Gilbert E. Houtekamer",
title = "The local disk controller",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "173--182",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of the I/O subsystem in the 370-XA
architecture has been improved considerably with the
introduction of the new channel subsystem, as compared
to the System/370 architecture. The emphasis in the
370-XA architecture is on reducing the CPU load
associated with I/O, and on reducing the congestion in
multi-CPU, shared systems, by redesigning the channel
system. In this paper we will show that a reallocation
of the control unit logic may triple the channel
subsystem's capacity, while still using the same disk
drives. The performance gain is achieved by adding
control-unit like intelligence and local buffer memory
to each disk drive, creating a Local Disk Controller
(LDC), and thus eliminating the performance degradation
caused by reconnect failures at a high channel
utilization. The system proposed remains fully software
compatible with the current 370-XA architecture. A
simpler approach, requiring only a slight modification
to the disk drives, is also discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:1985:MCC,
author = "Philip S. Yu and Daniel M. Dias and John T. Robinson
and Balakrishna R. Iyer and Douglas Cornell",
title = "Modelling of centralized concurrency control in a
multi-system environment",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "183--191",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317795.317823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of multiple systems sharing a common
data base is analyzed for an architecture with
concurrency control using a centralized lock engine.
The workload is based on traces from large mainframe
systems running IBM's IMS database management system.
Based on IMS lock traces the lock contention
probability and data base buffer invalidation effect in
a multi-system environment is predicted. Workload
parameters are generated for use in event-driven
simulation models that examine the overall performance
of multi-system data sharing, and to determine the
performance impact of various system parameters and
design alternatives. While performance results are
presented for realistic system parameters, the emphasis
is on the methodology, approximate analysis technique
and on examining the factors that affect multi-system
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1985:ASO,
author = "Alexander Thomasian and In Kyung Ryu",
title = "Analysis of some optimistic concurrency control
schemes based on certification",
journal = j-SIGMETRICS,
volume = "13",
number = "2",
pages = "192--203",
month = aug,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/317786.317824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:01:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optimistic Concurrency Control-OCC schemes based on
certification are analyzed in this paper. We allow two
types of data access schemes referred to as static and
dynamic. According to the first (second) scheme a
transaction reads all the required data items at the
beginning of its processing (on demand during its
processing), respectively. After completing its
processing, each transaction is checked as to whether
it has encountered a data conflict. Validated
transactions commit; otherwise, they are restarted. A
variant of the regular (silent) commit scheme where a
committing transaction notifies conflicted transactions
to restart immediately (broadcast commit scheme) is
also considered. We use an iterative method to analyze
the performance of OCC schemes in the framework of a
system with a fixed number of transactions in multiple
classes with given probabilities for their occurrence.
The iterative method is validated against simulation
and shown to be highly accurate even for high data
contention. We present graphs/tables, which are used to
determine how system performance is affected by: (i)
various OCC schemes, (ii) transaction size, i.e.,
number of data items accessed, (iii) number of
transactions, (iv) the distribution of transaction
processing time requirements, (v) the throughput
characteristic of the system, and (vi) granule
placement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ryu:1985:RPA,
author = "In Kyung Ryu",
title = "Review of {'OS 1100-of performance algorithms: a guide
to the resource allocation algorithms of OS-1100'} by
{John C. Kelly}",
journal = j-SIGMETRICS,
volume = "13",
number = "3--4",
pages = "9--9",
month = nov,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041844.1041845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The book describes the algorithms which were used by
OS-1100 to manage the resources of Sperry 1100 computer
systems, and lists the parameters that may affect the
performance of OS-1100. However, the book fails in
providing the reader how the algorithms and the
parameters affect the performance of OS-1100. It is not
clear to the reader why the algorithm in OS-1100 was
selected and how to tune the parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perros:1985:AMF,
author = "H. G. Perros and D. Mirchandani",
title = "An analytic model of a file server for bulk file
transfers",
journal = j-SIGMETRICS,
volume = "13",
number = "3--4",
pages = "14--22",
month = nov,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041844.1041846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An analytic model of a file server is presented. The
file server was an experimental system designed to
provide an environment for storage and retrieval of
bulk files. The file server was envisaged to be
accessed by single-user workstations, equipped with
limited secondary storage, via a local area network.
The analytic model is a hierarchical model involving an
open/closed queueing network of the BCMP type and an
open queueing network with blocking. These two models
were combined together through the means of an
iterative scheme. The results obtained from the
analytic model were in close agreement with simulation
data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Domanski:1985:BIS,
author = "Bernard Domanski",
title = "Building {IMS} synthetic workloads",
journal = j-SIGMETRICS,
volume = "13",
number = "3--4",
pages = "23--28",
month = nov,
year = "1985",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041844.1041847",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Historically, workload characterization, and cluster
analysis in particular, has been a proven technique
when applied to performance evaluation / capacity
planning studies. Given the problem of constructing a
synthetic workload that represents a production
workload, our goal is to use this technique to identify
a {\em concise}, yet accurate set of work units that
will compose the workload. For IMS, these work units
are transactions. Yet the selection of transactions
must be done with care; for an additional goal must be
to identify a {\em concise}, yet accurate set of
databases that are required by the transactions. This
paper will review clustering techniques, and apply them
to drive the transaction selection process. An
algorithm is also presented that identifies the
technique behind database selection. A case study
follows that illustrates the implementation of the
methodology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buzen:1986:MST,
author = "Jeffrey P. Buzen",
title = "Modeling {I/O} subsystems (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "1--1",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial will present techniques for modeling the
performance of I/O subsystems that incorporate
channels, control units, string controllers and direct
access devices. The presentation will focus on the
general principles involved in analyses of this type,
and will explore the strengths and weaknesses of
alternative assumptions. Attendees should gain an
overall understanding of basic analysis procedures so
they can deal with alternative I/O architectures that
are not treated explicitly in the presentation. The
material in this tutorial is mathematically oriented,
and attendees should have some familiarity with basic
queueing theory. However, the presentation is almost
entirely self contained, and all important concepts and
equations will be fully explained. Operational analysis
will be used throughout to simplify the derivation of
major results and clarify the assumptions required at
each stage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1986:WCT,
author = "Domenico Ferrari",
title = "Workload characterization (tutorial): issues and
approaches",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "1--1",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Workload characterization is that branch of
performance evaluation which concerns itself with the
measurement and modeling of the workloads to be
processed by the system being evaluated. Since all
performance indices of interest are workload-dependent,
there is no evaluation study that does not require the
characterization of one or more workloads. In spite of
the importance of the problem, our knowledge in this
area leaves much to be desired. The tutorial addresses
the main issues, both resolved and unresolved, in the
field, and surveys the major approaches that have been
proposed and are in use. Modern methods for designing
executable artificial workloads, as well as the
applications of these techniques in system procurement,
system tuning, and capacity planning are emphasized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goel:1986:SRM,
author = "Amrit L. Goel",
title = "Software reliability modeling (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "2--2",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are a number of views as to what software
reliability is and how it should be quantified. Some
people believe that this measure should be binary in
nature so that an imperfect program would have zero
reliability while a perfect one would have a
reliability value of one. This view parallels that of
program proving whereby the program is either correct
or incorrect. Others, however, feel that software
reliability should be defined as the relative frequency
of the times that the program works as intended by the
user. This view is similar to that taken in testing
where a percentage of the successful ewes is used as a
measure of program quality. According to the latter
viewpoint, software reliability is a probabilistic
measure and can be defined as follows: Let $F$ be a
class of faults, defined arbitrarily, and $T$ be a
measure of relevant time, the units of which are
dictated by the application at hand. Then the
reliability of the software package with respect to the
class of faults $F$ and with respect to the metric $T$,
is the probability that no fault of the class occurs
during the execution of the program for a prespecified
period of relevant time. A number of models have been
proposed during the past fifteen years to estimate
software reliability and several other performance
measures. These are based mainly on the failure history
of software and can be classified according to the
nature of the failure process studied as indicated
below. Times Between Failures Models: In this class of
models the process under study is the time between
failures. The most common approach is to assume that
the time between, say, the $ (i - 1)$ st and $i$ th
failures, follows a distribution whose parameters
depend on the number of faults remaining in the program
during this interval. Failure Count Models: The
interest of this class of models is in the number of
faults or failures in specified time intervals rather
than in times between failures. The failure counts are
assumed to follow a known stochastic process with a
time dependent discrete or continuous failure rate.
Fault Seeding Models: The basic approach in this class
of models is to ``seed'' a known number of faults in a
program which is assumed to have an unknown number of
indigenous faults. Input Domain Based Models: The basic
approach taken here is to generate a set of test cases
from an input distribution which is assumed to be
representative of the operational usage of the program.
Because of the difficulty in obtaining this
distribution, the input domain is partitioned into a
set of equivalence classes, each of which is usually
associated with a program path. In this tutorial we
discuss the key models from the above classes and the
related issues of parametric estimation, unification of
models, Bayesian interpretation, validation and
comparison of models, and determination of optimum
release time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hedlund:1986:PMI,
author = "Kye Hedlund",
title = "Performance modeling in integrated curcuit design
(tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "2--2",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial is an introduction to performance
modeling in the design of integrated circuits (ICs). It
assumes no background in either electrical engineering
or VLSI design; all relevant concepts and terminology
will be introduced. The goal is to give an overview of
the role of performance modeling in IC design, the
current state of the art, central problems and research
challenges. First, the process of IC design will be
reviewed. Every design progresses through a series of
stages: concept, architecture, implementation and
realization. Each level of design manipulates different
abstractions and hence is concerned with different
measures of design quality. Some principle measures
are: speed, silicon area, power consumption and the
number of input/output connections. There are several
different major design paradigms such as gate array,
standard cell and custom design. Each results in
different tradeoffs between flexibility, ease of
implementation and design quality. This has a
fundamental impact on both the design process and the
resulting design. Performance considerations enter into
IC design at a variety of levels: device, circuit,
logic design and architecture. Each requires different
performance models, and the designer must make
tradeoffs that are qualitatively different at different
levels. Circuit level design requires fast and accurate
models of logic gate behavior. A circuit's speed,
silicon area and power consumption must be accurately
estimated. Each of these circuit characteristics can be
traded off against the others, and the designer may
adjust the tradeoff in order to tune the circuit to the
needs of a particular application. Accurate and
computationally fast models form the basis for the
tools that assist the designer in circuit optimization.
Tools exist that accurately predict circuit performance
and that automatically optimize circuits. Integrated
circuit design is a field still in its infancy. This,
coupled with the fact that the underlying technological
base has undergone rapid change in recent years, means
that performance modeling of IC design is still in its
formative stages. Some areas (e.g. device modeling) are
more mature and better understood than others (e.g.
architectural modeling). Research opportunities are
plentiful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Artis:1986:ESP,
author = "H. Pat Artis",
title = "Expert systems for performance analysis (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "3--3",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A great portion of the formal practice called computer
performance evaluation is the application of rules of
thumb and proceduralized analysis of model results,
specific reports, and data elements based on the
experience and knowledge of the practitioner. Expert
systems provide a technique to support the analyst in
such mundane analyses and allow them to study more
complex problems that cannot easily be proceduralized.
Rather than replacing performance analysts expert
systems provide an opportunity to increase their
productivity. The tutorial focuses on a discussion of
the fundamental building blocks of expert systems:
vocabularies, rules, and policies. A familiar example
is used to illustrate using expert systems for analysis
of performance results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:1986:PIL,
author = "Satish K. Tripathi",
title = "Performance issues in local area networks (tutorial)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "3--3",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial addresses performance problems in Local
Area Networks (LAN). User level performance measures
are affected both by the software as well as
communication bottlenecks. Techniques for modeling the
key components of the performance of a LAN will be
presented. Models will be presented to discuss the
throughput and response time characteristics of LANs.
We also present some measurement data obtained from a
LAN performance experiment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stone:1986:FC,
author = "Harold S. Stone and Dominique Thibaut",
title = "Footprints in the cache",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "4--8",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops an analytical model for a
cache-reload transient. When an interrupt program or
system program runs periodically in a cache-based
computer, a short cache-reload transient occurs each
time the interrupt program is invoked. That transient
depends on the size of the cache, the fraction of the
cache used by the interrupt program, and the fraction
of the cache used by background programs that run
between interrupts. We call the portion of a cache used
by a program its footprint in the cache, and we show
that the reload transient is related to the area in the
tail of a normal distribution whose mean is a function
of the footprints of the programs that compete for the
cache. We believe that the model may be useful as well
for predicting paging behavior in virtual-memory
systems with round-robin scheduling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vernon:1986:PAM,
author = "Mary K. Vernon and Mark A. Holliday",
title = "Performance analysis of multiprocessor cache
consistency protocols using generalized timed {Petri}
nets",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "9--17",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We use an exact analytical technique, based on
Generalized Timed Petri Nets (GTPNs), to study the
performance of shared bus cache consistency protocols
for multiprocessors. We develop a general framework
within which the key characteristics of the Write-Once
protocol and four enhancements that have been combined
in various ways in the literature can be identified and
evaluated. We then quantitatively assess the
performance gains for each of the four enhancements. We
consider three levels of data sharing in our workload
models. One of the enhancements substantially improves
system performance in all cases. Two enhancements are
shown to have negligible effect over the range of
workloads analyzed. The fourth enhancement shows a
small improvement for low levels of sharing, but shows
more substantial improvement as sharing is increased,
if we assume a ``good access pattern''. The effects of
two architectural parameters, the blocksize and the
main memory cycle time are also considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harrison:1986:PMP,
author = "P. G. Harrison and A. J. Field",
title = "Performance modelling of parallel computer
architectures",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "18--27",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe two types of complex server
aggregations which can be used to model collections of
components in certain types of parallel computer
systems and give a case study showing how the
aggregations may be applied in practice. Analytical
models of such systems are becoming increasingly
important as a means of guiding the often complex
design processes, particularly since recent
developments in VLSI technology now make it possible to
fabricate many paper-designs hitherto impractical for
reasons of cost. We argue that aggregations of the type
described are essential in the modelling of parallel
systems; using the proposed techniques, large numbers
of components can be modelled as queue-length-dependent
servers within a queueing network in which the number
of servers is the same as the number of distinct types
of processing element in the system being modelled.
Because the number of severs in the model is fixed i.e.
is independent of the number of processors, very large
multiprocessor systems can be modelled efficiently with
no explosion in the size of the state space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Madnick:1986:MMC,
author = "Stuart Madnick and Y. Richard Wang",
title = "Modeling multiprocessor computer systems with
unbalanced flows",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "28--34",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A performance analysis methodology using certain
aspects of queueing theory to evaluate computer system
speed performance is presented. This methodology
specifically focuses on modeling multiprocessor
computer systems with unbalanced flows (i.e., number of
transactions leaving a server is not the same as number
of transactions entering that server) due to
asynchronously spawned parallel tasks. This unbalanced
flow phenomenon, which has a significant effect on
performance, cannot be solved analytically by classical
queueing network models. A decomposition method is
applied to decompose the unbalanced flows. Formulae for
open queueing networks with unbalanced flows due to
asynchronously spawned tasks are developed.
Furthermore, an algorithm based on Buzen's convolution
algorithm is developed to test the necessary and
sufficient condition for closed system stability as
well as to compute performance measures. An average of
less than four iterations is reported for convergence
with this algorithm. A Study of the INFOPLEX
multiprocessor data storage hierarchy, comparing this
rapid solution algorithm with simulations, has shown
highly consistent results. A cost effective software
tool, using this methodology, has been developed to
analyze an architectural design, such as INFOPLEX, and
to produce measures such as throughput, utilization,
and response time so that potential performance
problems can be identified.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kleeman:1986:APB,
author = "Lindsay Kleeman and Antonio Cantoni",
title = "The analysis and performance of batching arbiters",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "35--43",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class of arbiters, known as batching arbiters, is
introduced and defined. A particularly simple
decentralised example of a batching arbiter is
described, with motivation given for the batching
arbiter model adopted. It is shown that under
reasonable assumptions, batching arbiters can be
described by a finite state Markov chain. The key steps
in the analysis of the arbiter performance are the
method of assigning states, evaluation of state
transition probabilities and showing that the Markov
chain is irreducible. Arbiter performance parameters
are defined, such as proportion of time allocated to
each requester and mean waiting time for each
requester. Apart from results describing the steady
state behavior of the arbiter for general system
parameters, a number of limiting results are also
obtained corresponding to light and heavy request
loading. Finally, numerical results of practical
interest are presented, showing the performance
parameters of the arbiter versus request rates for
various configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lehoczky:1986:PRT,
author = "John P. Lehoczky and Lui Sha",
title = "Performance of real-time bus scheduling algorithms",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "44--53",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317538",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When periodic tasks with hard deadlines communicate
over a bus, the problem of hard real-time bus
scheduling arises. This paper addresses several
problems of hard real-time bus scheduling, including
the evaluation of scheduling algorithms and the issues
of message packet pacing, preemption, priority
granularity and buffering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leland:1986:LBH,
author = "Will Leland and Teunis J. Ott",
title = "Load-balancing heuristics and process behavior",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "54--69",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317539",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic load balancing in a system of loosely-coupled
homogeneous processors may employ both judicious
initial placement of processes and migration of
existing processes to processors with fewer resident
processes. In order to predict the possible benefits of
these dynamic assignment techniques, we analyzed the
behavior (CPU, disk, and memory use) of 9.5 million
Unix* processes during normal use. The observed process
behavior was then used to drive simulation studies of
particular dynamic assignment heuristics.\par
Let $ F(\cdot) $ be the probability distribution of the
amount of CPU time used by an arbitrary process. In the
environment studied we found:\par
$ \bullet $ $ (1 - F(x)) \approx r x^{-c}, $1.05 < c <
1.25;\par
$ \bullet $ $ F(\cdot) $ is far enough from exponential
to make exponential models of little use.\par
$ \bullet $ With a foreground-background process
scheduling policy in each processor, simple heuristics
for initial placement and processor migration can
significantly improve the response ratios of processes
that demand exceptional amounts of CPU, without harming
the response ratios of ordinary processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1986:CPB,
author = "Kyoo Jeong Lee and Don Towsley",
title = "A comparison of priority-based decentralized load
balancing policies",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "70--77",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317540",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load balancing policies in distributed systems divide
jobs into two classes; those processed at their of
origination (local jobs) and those processed at some
other site in the system after being transferred
through a communication network (remote jobs). This
paper considers a class of decentralized load balancing
policies that use a threshold on the local job queue
length at each host in making decisions for remote
processing. They differ from each other according to
how they assign priorities to each of these job
classes, ranging from one providing favorable treatment
to local jobs to one providing favorable treatment to
remote jobs. Under each policy, the optimal load
balancing problem is formulated as an optimization
problem with respect to the threshold parameter. The
optimal threshold is obtained numerically using
matrix-geometric formulation and an iteration method.
Last, we consider the effects that the job arrival
process can have on performance. One expects that load
balancing for systems operating in an environment of
bursty job arrivals should be more beneficial than for
an environment with random job arrivals. This fact is
observed through numerical examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{LeBoudec:1986:BEM,
author = "Jean-Yves {Le Boudec}",
title = "A {BCMP} extension to multiserver stations with
concurrent classes of customers",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "78--91",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a multiclass service station with $B$
identical exponential servers, with constant service
rate $ \mu $. At a station, the classes of customers
are sorted into $M$ concurrent groups; the discipline
of service is on a first come first served basis, but
two customers of the same group cannot be served
simultaneously. We show that product form is maintained
when such stations are inserted in BCMP networks, and
give closed form expressions for the steady-state
probabilities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conway:1986:EAS,
author = "A. E. Conway and N. D. Georganas",
title = "An efficient algorithm for semi-homogeneous queueing
network models",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "92--99",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317542",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The class of product-form semi-homogeneous queueing
networks is introduced as a generalization of the class
of homogeneous networks, which has been considered by
Balbo et al for the performance modeling of local area
networks. In semi-homogeneous networks, the relative
traffic intensity at the various shared resources may
depend on the routing chain to which a customer
belongs. We develop an efficient algorithm for the
exact analysis of this class of networks. It is based
on the equations which form the foundation of RECAL, a
general purpose exact algorithm for multiple-chain
closed queueing networks. The complexity of the
algorithm is shown to be of order less than exponential
in $ (P - 1)^{1 / 2} $, where $P$ is the number of
processors (workstations) in the network. It is
therefore, in general, more efficient than a direct
application of either convolution, MVA or RECAL to the
class of semi-homogeneous queueing networks. The
algorithm presented here may be situated between the
algorithms of Balbo et al and the general purpose
algorithms, both in terms of its generality and
efficiency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nain:1986:OMH,
author = "Philippe Nain and Keith Ross",
title = "Optimal multiplexing of heterogeneous traffic with
hard constraint",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "100--108",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317543",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Considered are optimal dynamic policies for
multiplexing $ K + 1 $ heterogeneous traffic types onto
a single communication channel. The packet types arrive
to the channel according to independent Poisson
processes. The service requirements are exponential
with type dependent means. The optimization criterion
is to minimize a linear combination of the average
delays for packet types 1 to $K$, while simultaneously
subjecting the average delay of type-0 packets to a
hard constraint. The optimal multiplexing policy is
shown to be a randomized modification of the ``$ \mu c$
rule''. The optimization problem is thereby reduced to
a problem of finding the optimal randomization factor;
an algorithm, which can be implemented in real time, is
given to do this for two particular cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:1986:CTP,
author = "Kenneth Sevcik and Marjory J. Johnson",
title = "Cycle time properties of the {FDDI} token ring
protocol (extended abstract)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "109--110",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317544",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Communication technology now makes it possible to
support high data transmission rates at relatively low
cost. In particular, optical fiber can be used as the
medium in local area networks with data rates in the
range of 100 megabits per second. Unfortunately, local
area network topologies and communication protocols
that work well with lower speed media are not
necessarily appropriate when the data transmission rate
is scaled up by approximately an order of magnitude.
Recognizing this fact, an ANSI sub-committee (ANSIX3T9)
has been working for the past two years on a proposed
standard for a token ring protocol tailored to a
transmission medium with transmission rate in the 100
megabits per second range. The protocol is referred to
as the FDDI (Fiber Distributed Data Interface) Token
Ring protocol. The proposal for the standard is now
quite mature and nearly stable. While numerous analyses
of the performance of token ring protocols have been
carried out and described in the literature, these have
for the most part dealt with protocol variations of
less complexity than FDDI. The major feature that
distinguishes FDDI from token ring protocols that have
been analyzed previously is the concept of a ``timed
token'', which selectively allocates the right to
transmit data among the stations depending in part on
how rapidly the token progressed around the ring on the
previous cycle. A station is allowed to transmit
certain types of data only if the token's last cycle
has been shorter than a ``target'' token rotation time.
This feature makes it possible to give guaranteed
response to time-critical messages. The ``timed token''
creates some dependencies among transmissions at
various stations, however, and these dependencies
complicate the analysis of the protocol's performance.
The basic ideas of the timed token protocol on which
the FDDI protocol is based were first presented by Grow
[``A Timed-Token Protocol for Local Area Networks'',
Electro `82, 1982]. He distinguished two types of
traffic. Synchronous traffic is a type of traffic that
has delivery time constraints. Examples include voice
and video transmissions, where delays in transmission
can result in disruptions of the sound or picture
signal. Asynchronous traffic has no such time
constraints, or at least the time constraints are
measured in units that are large relative to the token
cycle time. Here is a brief overview of how the ``timed
token'' protocol works. The stations on the local area
network choose, in a distributed fashion, a target
token rotation time (TTRT). Basically, the TTRT is
chosen to be sufficiently small that requirements for
responsiveness at every station will be met. The right
to use network bandwidth for transmission of
synchronous traffic is allocated among the stations in
a manner that guarantees that network capacity is not
exceeded. The token is then forced by the protocol to
circulate with sufficient speed that all stations
receive their allocated fractions of capacity for
synchronous traffic. This is done by conditioning the
right to transmit asynchronous messages on the fact
that the token has rotated sufficiently fast that it is
``ahead of schedule'' in delivering synchronous
allocations to the stations. In essence, the TTRT value
dictates a departure schedule for the token to pass
from station to station, and asynchronous traffic can
be transmitted only when doing so does not cause that
schedule to broken. Subsequently, Ulm [``A Timed Token
Ring Local Area Network and Its Performance
Characteristics'', Proc. of Conf. on Local Area
Networks, IEEE, 1982] analyzed the protocol described
by Grow and determined its sensitivity to various
parameters. He considered the effect of overheads and
provided a number of graphs indicating the impact of
various parameters on maximum transmission capacity. As
well as describing the timed token protocol, Grow and
Ulm included intuitive arguments supporting two
fundamental properties of (a somewhat idealized version
of) the protocol. These two properties are: The average
token cycle time in the absence of failures is at most
the TTRT. The maximum token cycle time in the absence
of failures is at most twice the TTRT. Both these
properties are important to the successful operation of
the protocol. The first one guarantees that the average
long run bandwidth provided to each station is at least
its allocated fraction of the network's capacity. The
second property guarantees that, in the absence of
component failures, the time between a station's
successive opportunities to transmit synchronous
traffic will never exceed twice the target token
rotation time. While Grow and Ulm assert that these
properties hold for the timed-token protocol, neither
formal proofs nor references are provided. Because the
FDDI protocol is based on the same timed-token
protocol, subsequent publications specifically
describing the FDDI protocol have also claimed that the
two properties hold. In this paper, we prove both
properties using a common notational framework. We
first treat an idealized situation in which several
types of overhead are ignored. We actually study a
protocol that is slightly more liberal that the FDDI
proposed standard in that it allows asynchronous
transmission more often because ``lateness'' is not
carried forward from cycle to cycle. The protocol
variation, which still guarantees properties (1) and
(2), is at least as easily implemented as the original
version. Also, it guarantees sufficient responsiveness
and capacity for the transmission of synchronous
traffic, while providing improved responsiveness to
asynchronous transmissions. When overheads are
considered, it is found that the proposed standard FDDI
protocol satisfies the constraint on average token
rotation time (relying on the retention of ``lateness''
from cycle to cycle), but not the one on maximum cycle
time. We analyze a variation of the protocol that
ignores accumulated lateness, but accounts for the
various overhead sources. The advantages of the new
rule include: It guarantees both desired properties
without having to retain ``lateness'' from one cycle to
the next. It provides better service to asynchronous
requests in the case where the amount of overhead is
small relative the token rotation time. (When the
amount of overhead is large, the original proposed
protocol may have token rotation times significantly in
excess of twice the TTRT.) It is easier to implement.
Work is underway on the task of quantifying the
performance of the FDDI protocol by determining
estimates of, or tighter bounds on, the average token
rotation time and on the average delivery time of a
submitted message. The properties established in this
paper are required to form the basis of the
quantitative analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dallery:1986:ADP,
author = "Yves Dallery and Rajan Suri",
title = "Approximate disaggregation and performance bounds for
queueing networks with multiple-server stations",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "111--128",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317545",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce the concept of approximate disaggregation
which enables us to replace a station by a subnetwork,
i.e. a set of stations, such that the performance of
the derived network is close to the performance of the
initial network. We use this concept to disaggregate
any multiple-server station into a set single-server
stations. Using two different disaggregations, we are
able to bound the performance of the initial network by
the performance of a ``lower'' and an ``upper'' network
each consisting of single-server stations, whose
performance can in turn be bounded by the Balanced Job
Bounds (or other known bounds). Several examples show
the useful information provided by these bounds at a
very low cost: for $K$ stations and $N$ customers, the
computational complexity here is $ \Omega (K)$ which is
significantly less than the $ \Omega (K N^2)$
operations required for exact solution. Indeed, despite
the multiple server stations, the computational
complexity of our bounds is the same as that of
Balanced Job Bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate disaggregation; closed queueing networks;
performance bounds; product form networks",
}
@Article{Strelen:1986:GMV,
author = "Johann Strelen",
title = "A generalization of mean value analysis to higher
moments: moment analysis",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "129--140",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317546",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Closed product-form queueing networks are considered.
Recursive schemata are proposed for the higher moments
of the number of customers in the queues, called
``moment analysis''. As with mean value analysis (MVA),
in general no state probabilities are needed.
Approximation techniques for these schemata similar to
those existing for MVA are introduced.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massey:1986:PAD,
author = "William A. Massey",
title = "A probabilistic analysis of a database system",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "141--146",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317547",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In Gray, Homan, Obermarck, and Korth [GHOK], the
authors give many conjectures based on simulation for
the probabilistic analysis of transaction lock-waits
and transaction deadlocks. In this paper, we introduce
a probabilistic model to explain their observations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Witkowski:1986:PEM,
author = "Andrew Witkowski",
title = "Performance evaluation of multiversion with the
{Oracle} synchronization",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "147--158",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317548",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a new analytical model for
performance measurements of timestamp driven databases.
The model is based on two-dimensional Poisson processes
where one coordinate represents the real arrival time
and the other the timestamp of an arriving messages.
The notion of preemption is defined which serves as a
model for synchronization. Preemption naturally implies
such performance measures as response time and amount
of abortion in the system. The concept of oracle is
introduced which allows evaluation of a lower bound on
the synchronization cost. Preemption and the oracle are
then used to evaluate performance of the Multiversion
synchronization. We present the distribution and the
expectation of the synchronization cost. The analysis
is then applied to a database with exponential
communication delays ($ \alpha $) and the intensity of
transaction $ \lambda $. It is shown that for
Multiversion, this cost depends linearly on $ l /
\alpha $ and logarithmically on $ \lambda $.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Swinghal:1986:PAA,
author = "Mukesh Swinghal and A. K. Agrawala",
title = "Performance analysis of an algorithm for concurrency
control in replicated database systems",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "159--169",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317549",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we analyze the performance of a
concurrency control algorithm for replicated database
systems. We present a model of a distributed database
system which provides a framework to study the
performance of different concurrency control
algorithms. We discuss performance criteria to evaluate
different algorithms. We use the model to analyze the
performance of an algorithm for concurrency control in
replicated database systems. The technique used in
analysis is iterative and approximate. We plot a set of
performance measures for several values of the model
parameters. The results of analysis are compared
against a simulation study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate solutions; error analysis; mean value
analysis; moment analysis; multiclass queueing
networks; product-form solutions",
}
@Article{Haikala:1986:AMP,
author = "Ilkka Haikala",
title = "{ARMA} models of program behaviour",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "170--179",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In models of virtual memory computer systems, it is
generally assumed that the time intervals between the
page (or segment) faults, often called lifetimes, are
independent from each other. Due to the
phase-transition behaviour in many real programs this
is not always true, and strong correlations may exist
between successive lifetimes. These correlations may
have a notable effect on the system behaviour. This
paper describes a series of experiments where
autoregressive-moving average (ARMA) models are used to
describe the correlation structure in sequences of
lifetimes. It is shown that many real program
executions can be described with models having four
parameters only, i.e. with the ARMA(1,1) models. The
models can be used as parts of simulation models for
instance, and they also give us better understanding
about the program behaviour in general.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Majumdar:1986:MAL,
author = "Shikharesh Majumdar and Richard B. Bunt",
title = "Measurement and analysis of locality phases in file
referencing behaviour",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "180--192",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317499.317551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent research has demonstrated the existence of
locality in short-term file referencing behaviour. A
detailed study of the dynamic characteristics of file
referencing is presented in this paper. The concept of
Bounded Locality Intervals from the field of program
behaviour has been used to model the locality phases of
file referencing behaviour. The model is found to be
powerful both from a descriptive point of view and from
the perspective of understanding the performance
implications of locality properties of file referencing
behaviour on file system management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Razouk:1986:MOS,
author = "Rami R. Razouk and Terri Stewart and Michael Wilson",
title = "Measuring operating system performance on modern
micro-processors",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "193--202",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of micro-processors and commercial operating
systems in real-time applications demands a good
understanding of factors which influence software
performance. Advances in micro-processor design (e.g.
pipelining) make performance prediction based on
instruction cycle counts difficult. In addition, the
increasing complexity of operating systems raises
doubts about our ability to ensure that their
performance will meet system requirements. Performance
measurement is more important than ever. This paper
describes an ongoing project intended to use
performance measurements to characterize the
performance of real-time systems software. To date the
project has conducted extensive experiments on an
in-house operating system running on Intel's 286/10
micro-computer in order to test the feasibility of
accurate and repeatable measurement of O/S performance.
The measurement approach, which views the software from
a resource-consumption standpoint, can be applied to
both O/S and application level software. Some of the
measurement results are presented here and are used to
test the manufacturer's assumptions about the
hardware's performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicola:1986:QAF,
author = "Victor F. Nicola and V. G. Kulkarni and Kishor S.
Trivedi",
title = "Queueing analysis of fault-tolerant computer systems
(extended abstract)",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "203--203",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing models provide a useful tool for predicting
the performance of many service systems including
computer systems, telecommunication systems,
computer/communication networks and flexible
manufacturing systems. Traditional queueing models
predict system performance under the assumption that
all service facilities provide failure-free service. It
must, however, be acknowledged that service facilities
do experience failures and that they get repaired. In
recent years, it has been increasingly recognized that
this separation of performance and
reliability/availability models is no longer adequate.
An exact steady-state queueing analysis of such systems
is considered by several authors and is carried out by
means of generating functions, supplementary variables,
embedded Markov process and renewal theory, or
probabilistic techniques [1,2,7,8]. Another approach is
approximate, in which it is assumed that the time to
reach the steady-state is much smaller than the times
to failures/repairs. Therefore, it is reasonable to
associate a performance measure (reward) with each
state of the underlying Markov (or semi-Markov) model
describing the failure/repair behavior of the system.
Each of these performance measures is obtained from the
steady-state queueing analysis of the system in the
corresponding state [3,5]. Earlier we have developed
models to derive the distribution of job completion
time in a failure-prone environment [3,4]. In these
models, we need to consider a possible loss of work due
to the occurrence of a failure, i.e., the interrupted
job may be resumed or restarted upon service
resumption. Note that the job completion time analysis
includes the delays due to failures and repairs. The
purpose of this paper [9] is to extend our earlier
analysis so as to account for the queueing delays. In
effect, we consider an exact queueing analysis of
fault-tolerant systems in order to obtain the
steady-state distribution and the mean of the number of
jobs in the system. In particular, we study a system in
which jobs arrive in a Poisson fashion and are serviced
according to FCFS discipline. The service requirements
of the incoming jobs form a sequence of independent and
identically distributed random variables. The
failure/repair behaviour of the system is modelled by
an irreducible continuous-time Markov chain, which is
independent of the number of jobs in the system. Let
the state-space be $ \{ 1, 2, \ldots {}, n \} $. When
the computer system is in state $i$ it delivers service
at rate $ r_i \geq 0$. Furthermore, depending on the
type of the state, the work done on the job is
preserved or lost upon entering that state. The actual
time required to complete a job depends in a complex
way upon the service requirement of the job and the
evolution of the state of the system. Note that even
though the service requirements of jobs are independent
and identically distributed, the actual times required
to complete these jobs are neither independent nor
identically distributed, and hence the model cannot be
reduced to a standard M/G/1 queue [8]. As loss of work
due to failures and interruptions is quite a common
phenomenon in fault-tolerant computer systems, the
model proposed here is of obvious interest. Using our
earlier results on the distribution of job completion
time we set up a queueing model and show that it has
the block M/G/1 structure. Queueing models with such a
structure have been studied by Neuts, Lucantoni and
others [6]. We demonstrate the usefulness of our
approach by performing the numerical analysis for a
system with two processors subject to failures and
repairs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1986:ACQ,
author = "E. G. {Coffman, Jr.} and E. Gelenbe and E. N.
Gilbert",
title = "Analysis of a conveyor queue in a flexible
manufacturing system",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "204--223",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a flexible manufacturing system stations are
arranged along a common conveyor that brings items for
processing to the stations and also carries away the
processed items. At each station specialized robots
automatically load and unload items on and off the
conveyor. We examine here a single station in such a
system. A new kind of queueing problem arises, with
input-output dependencies that result because the same
conveyor transports items both to and from the station.
The paper analyzes two models of a station. Model 1 has
one robot that cannot return a processed item to the
conveyor while unloading a new item for processing.
Model 2 has two robots to allow simultaneous loading
and unloading of the conveyor. A principal goal of the
analysis is the proper choice of the distance
separating the two points at which items leave and
rejoin the conveyor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kouvatsos:1986:MEQ,
author = "Demetres D. Kouvatsos",
title = "A maximum entropy queue length distribution for the
{G/G/1} finite capacity queue",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "224--236",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317555",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new ``hybrid'' analytic framework, based on the
principle of maximum entropy, is used to approximate
the queue length distribution of a G/G/1 finite buffer
queue. Robust recursive relations are derived and
asymptotic connections to the infinite capacity queue
are established. Furthermore, ``equivalence''
principles are applied to analyse two-stage cyclic
queues with general service times and favourable
comparisons with global balance solutions are made.
Numerical examples provide useful information on how
critically system behaviour is affected by the
distributional form of interarrival and service
patterns. It is shown that the maximum entropy solution
predicts the bottleneck ``anomaly'' and also it defines
bounds on system performance. Comments on the
implication of the work to the analysis and aggregation
of computer systems are included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Takagi:1986:QAN,
author = "Hideaki Takagi and Masayuki Murata",
title = "Queueing analysis of nonpreemptive reservation
priority discipline",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "237--244",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Analysis is given to a nonpreemptive priority queueing
system with $P$ classes of messages where the class of
message to be served next is the highest priority class
waiting at the time of service start. (If this were the
highest priority class waiting at the service
completion epoch, we would have a classical
nonpreemptive head-of-line priority queueing system.)
We assume that the message service time distribution is
identical for all classes. The mean message waiting
time is obtained explicitly for each class, and
numerically compared to the values in the corresponding
head-of-line system. We have also proposed and
evaluated a fairness measure to demonstrate the degree
of discrimination. This model can be applied to the
performance analysis of the prioritized token-ring
scheme in local area computer networks when the
propagation delay and bit latency are negligible
compared to the frame transmission time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hofri:1986:QSP,
author = "Micha Hofri",
title = "Queueing systems with a procrastinating server",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "245--253",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two related problems are analyzed and discussed: A
queueing system that differs from the standard M/G/1
only in that at the end of a busy-period the server
takes a sequence of vacations, inspecting the state of
the queue at the end of each. When the length of the
queue exceeds a predetermined level $m$ it returns to
serve the queue exhaustively. Two queues, with Poisson
arrivals and general service-time distributions are
attended by a single server. When the server is
positioned at a certain queue it will serve the latter
exhaustively, and at busy-period end will only switch
to the other if the queue length there exceeds in size
a predetermined threshold mi. The treatment combines
analytic and numerical methods. Only steady-state
results are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boxma:1986:WTA,
author = "O. J. Boxma and B. Meister",
title = "Waiting-time approximations for cyclic-service systems
with switch-over times",
journal = j-SIGMETRICS,
volume = "14",
number = "1",
pages = "254--262",
month = may,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/317531.317558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:02:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mean waiting-time approximations are derived for a
single-server multi-queue system with nonexhaustive
cyclic service. Non-zero switch-over times of the
server between consecutive queues are assumed. The main
tool used in the derivation is a pseudo-conservation
law recently found by Watson. The approximation is
simpler and, as extensive simulations show, more
accurate than existing approximations. Moreover, it
gives very good insight into the qualitative behavior
of cyclic-service queueing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:1986:MFA,
author = "Irene Hu",
title = "Measuring file access patterns in {UNIX}",
journal = j-SIGMETRICS,
volume = "14",
number = "2",
pages = "15--20",
month = aug,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/15827.15828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:16 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "UNIX is a disk-based operating system, where only the
system kernel is always memory-resident. A combination
of small block size, limited read-ahead and numerous
seeks can severely limit the file system throughput.
This paper presents a tool to study the file access
patterns. Information derived from the data collected
can be used to determine the optimal disk block size
and also to improve the block placement strategy. The
tool is a software monitor, installed at the device
driver level, and triggered by every physical request
to the disk handler. The design approach used to
measure the average number of logical records accessed
sequentially is described. An evaluation of the tool is
also presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ferrari:1986:CIP,
author = "Domenico Ferrari",
title = "Considerations on the insularity of performance
evaluation",
journal = j-SIGMETRICS,
volume = "14",
number = "2",
pages = "21--32",
month = aug,
year = "1986",
CODEN = "????",
DOI = "https://doi.org/10.1145/15827.15829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:16 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The author argues that systems performance evaluation,
in the first twenty years of its existence, has
developed in substantial isolation with respect to such
disciplines as computer architecture, system
organization, operating systems, and software
engineering. The possible causes for this phenomenon,
which seems to be unique in the history of engineering,
are explored. Its positive and negative effects on
computer science and technology, as well as on
performance evaluation itself, are discussed. In the
author's opinion, the drawbacks of isolated development
outweigh its advantages. Thus, the author proposes
instructional and research initiatives to foster the
rapid integration of the performance evaluation
viewpoint into the main stream of computer science and
engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tripathi:1987:RWD,
author = "Satish K. Tripathi and Steve Kaisler and Sharat
Chandran and Ashok K. Agrawala",
title = "Report on the {Workshop on Design \& Performance
Issues in Parallel Architectures}",
journal = j-SIGMETRICS,
volume = "14",
number = "3--4",
pages = "16--32",
month = jan,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/25286.25287",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:20 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Machines that perform computations in parallel have
come into vogue today partly prodded by technology and
user needs. In the early spring of `86, a workshop was
held under the auspices of the University of Maryland
Institute for Advanced Computer Studies (UMIACS) to
investigate the design and the not-usually-addressed
issue of the performance of these machines. This report
serves as a record of the workshop though it does not
promise to be a transcript of the various sessions.
About a dozen presentations interspersed with spirited
open-forum discussions have been paraphrased here. It
is hoped that this report remains faithful to the
proceedings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gray:1987:VDS,
author = "Jim Gray",
title = "A view of database system performance measures",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "3--4",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Database systems allow quick creation of performance
problems. The goal of database systems is to allow the
computer-illiterate to write complex and complete
applications. It is the job of the system to translate
a high-level description of data and procedures into
efficient algorithms. The REAL performance metric of a
system is how successfully it meets these goals.
Practitioners use a much narrower definition of system
performance. They assume a standard workload and
measure performance by peak throughput and by dollar
cost per transaction. Although many vendors have
``private'' performance measures, Bitton, Dewitt, and
Turbyfill were the first to publish a measure of
database system performance [Bitton]. Their measure,
here called the Wisconsin benchmark, consists of a
database design, a set of 32 retrieval and update
statements, and a script for multi-user tests. They
give two performance metrics: the elapsed time for each
statement and the throughput of the system when running
sixteen simultaneous scripts. No response time
requirement or cost measure is included in the
definition. The Wisconsin benchmark is the most widely
used database benchmark. Largely in response to the
Wisconsin benchmark, an informal group including Bitton
and Dewitt, defined a benchmark more representative of
transaction processing applications [Anon]. Its
workload is: SCAN --- A mini-batch operation to
sequentially copy 1000 records SORT --- A batch
operation to sort one million records. DebitCredit ---
A short transaction with terminal input and output via
X.25, presentation services, and a mix of five database
accesses. The DebitCredit transaction has rules for
scaling the terminal network and database size as the
transaction rate increases, and also rules for
distributing transactions if the system is
decentralized. The performance metrics for this
benchmark are: Elapsed time for the SCAN and SORT. Peak
throughput for the DebitCredit transaction at 1 second
response time for 95\% of the transactions. This gives
a TPS (Transactions Per Second) rating. Price per
transaction where price is the 5-year cost of hardware,
software and maintenance. This is sometimes called the
vendors-view of price. This benchmark has been adopted
by several vendors to compare their performance and
price performance from release to release and also to
compare their performance to competitive products.
MIPS, Whetstones and MegaFLOPs have served a similar
role in the scientific community. A system's TPS rating
indicates not just processor speed, but also IO
architecture, operating system, data communications and
database software performance. Unfortunately, it does
not capture ease-of-use. Work continues on formalizing
these benchmarks. At present they are written in
English. Ultimately they should be defined by a file
generator and a set of programs written in a standard
database language such as COBOL-SQL. When a vendor
first measures his system against these benchmarks, the
results are usually terrible. Both benchmarks are
designed to expose generic performance bugs in
frequently used transaction processing atoms. For
example, the Wisconsin and SCAN benchmarks heavily
penalize a system which is slow to read the next record
in a file. A system with poor performance on these
benchmarks can be analyzed as follows: Most vendors
have an ``atomic'' model of their system which
represents each transaction as a collection of atoms.
The atoms are the primitives of the system. For
example, the SCAN benchmark is represented by most
vendors as: SCAN: BEGIN TRANSACTION PERFORM 1000 TIMES
READ SEQUENTIAL INSERT SEQUENTIAL COMMIT TRANSACTION
The atomic weights for, BEGIN, READ SEQUENTIAL, INSERT
SEQUENTIAL, and COMMIT are measured for each release.
The atomic weight usually consists of CPU instructions,
message bytes, and disc IOs for a ``typical'' call to
that operation. These weights can be converted to
service times by knowing the speeds and utilizations of
the devices (processors, discs, lines) used for the
application. The molecular weight and service time of
SCAN can then be computed as the sum of the atomic
weights. Defining and measuring a system's atoms is
valuable. It produces a simple conceptual model of how
the system is used. Atomic measurements also expose
performance bugs. For example, based on the SCAN
benchmark, most systems perform READ SEQUENTIAL in 1000
instructions and with 0.02 disc IO. If a system uses
many more instructions or many more IO then it has a
performance problem. Similarly, the DebitCredit
transaction typically consumes about 2OOKi (thousand
instructions) and five disc IO per transaction. One
system is known to use 800Ki and 14 IO per transaction.
The vendor could use atomic measurement to find the
causes of such poor performance. When such problems are
localized to an atom, solutions to the problem readily
suggest themselves. So, atomic measurement is useful
for performance assurance and performance improvement.
Atomic measurement also has a major role in system
sizing and in capacity planning. If the customer can
describe his application in terms of atoms, then a
spreadsheet application can give him an estimate of the
CPU, disc and line cost for the application. With
substantially more effort (and assumptions) the
system's response time can be predicted. With even more
effort, a prototype system can be generated and
benchmarked from the atomic transaction descriptions.
Snapshot [Stewart] and Envision [Envison] are examples
of systems which combine atomic modeling, queue
modeling, and ultimately benchmarking of real systems
generated from the atomic description of the
application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heidelberger:1987:PCM,
author = "Philip Heidelberger and Seetha Lakshmi",
title = "A performance comparison of multi-micro and mainframe
database architectures",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "5--6",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Database machine architectures consisting of multiple
microprocessors or mini-computers are attracting wide
attention. There have been several proposals and
prototypes (see, e.g., DeWitt, Gerber, Graefe, Heytens,
Kumar and Muralikrishna (1986), Fishman, Lai and
Wilkinson (1984), Hsiao (1983), or the 1983 and 1985
Proceedings of the International Workshop on Database
Machines). There is also a commercially available
system based on multiple microprocessors (Teradata
(1984)). With these architectures it is possible to
exploit parallelism at three levels: within a single
query, within a single transaction, and by
simultaneously executing multiple independent
transactions. The rationale behind these multiple
microprocessor architectures is primarily to take
advantage of the potential lower cost per MIPS
(Millions of Instructions per Second, a measure of
processing power) of microprocessors as opposed to
mainframes. In addition, database machines may offer
incremental capacity growth as well as improved
performance for large queries by exploiting parallelism
within a single query. However, it is not clear if
database machines made of multiple microprocessors
indeed have any cost/performance advantage over a more
conventional mainframe based database management
systems. Several papers on the performance analysis of
database machines can be found in the literature (e.g.,
Salza, Terranova and Velardi (1983) or Bit and Hartman
(1985)). Most of these studies have focused on
determining the execution time of a single query in a
particular database machine architecture. Few studies
have dealt with the response time of single queries in
a multi-user environment. We are not aware of any
papers that systematically study the performance
trade-offs between a multi-microprocessor database
machine and a large mainframe system. This paper
presents such a systematic study. We examine a
hypothetical database machine that uses standard
microprocessors and disks; database machines that use
special purpose hardware are not considered here (e.g.,
Sakai, Kamiya, Iwata, Abe, Tanaka, Shibayama and
Murakami (1984)). However, we do not limit our studies
to the components available today; we also consider
processors and disks projected to be available in the
future. We assume that both the database machine and
the mainframe provide relational database functions
(e.g., Date (1986)). While there are several
applications for relational database (on-line
transaction processing, ad-hoc queries, etc.), we limit
our attention to one specific application domain;
namely high volume on-line transaction processing. In
this domain, we consider a range of transactions and
investigate the sensitivity of the two architectures to
various transaction related parameters. Dias, Iyer and
Yu (1986), in a similar study, have investigated the
issue of coupling many small systems to obtain
comparable performance of a few (coupled) large
systems. Their study is limited to a specific workload
with no parametric or sensitivity study with respect to
transaction characteristics and the architectures they
compared are quite different from the database machine
considered in this paper. For high volume transaction
processing environments, there appears to be only a
limited potential to exploit parallelism within a
single transaction. It is therefore expected that since
the database machine is made of slower processors and
since the functions are distributed across several
processors, it would require more aggregate processing
capacity, or MIPS, than the mainframe to sustain a
given throughput and a response time. Thus there is a
trade-off between the cheaper cost per MIPS of
microprocessors as opposed to mainframes and the
increase in aggregate MIPS required by the database
machine to achieve a given performance level. This
paper addresses this trade-off through the use of
queueing network performance models of the two
architectures. Assuming that the MIPS ratings of the
microprocessor and mainframe are equivalent, our models
indicate that with today's processor technology, the
performance of the database machine is sensitive to the
transaction complexity, the amount of skew in the data
access pattern, the amount of overhead required to
implement the distributed database function and the
buffer miss ratio. Furthermore, there is only a narrow
range of transaction processing workloads for which the
database machine can meet a prespecified response time
objective with only a moderate increase in aggregate
processing capacity over that of the mainframe.
However, using the technology projected for the early
1990's, our models predict that the performance of the
hypothetical database machine is less sensitive to the
above factors. Assuming that the level of lock
contention is low, the memory hierarchies of the two
architectures are equivalent (in the sense of achieving
equal buffer miss ratios), and the performance of disks
are equivalent in the two architectures, the models
predict that the performance objective can be met with
only a moderate increase in aggregate capacity for a
broader range of transaction workloads. The workloads
considered in this paper consist of relatively short
transactions based on primary key retrievals and
updates. It is therefore difficult to make general
conclusions about the overall superiority of one
architecture against the other when a mixed set of
workloads is expected (our study assumes that all
transactions have the same expected pathlength and I/O
activity). This study focused on performance issues and
specifically does not address such issues as MIPS
flexibility (general purpose versus special purpose
architectures), security, recovery and system
management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reed:1987:PRA,
author = "Daniel A. Reed and Chong-kwon Kim",
title = "Packet routing algorithms for integrated switching
networks",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "7--15",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Repeated studies have shown that a single switching
technique, either circuit or packet switching, cannot
optimally support a heterogeneous traffic mix composed
of voice, video and data. Integrated networks support
such heterogeneous traffic by combining circuit and
packet switching in a single network. To manage the
statistical variations of network traffic, we introduce
a new, adaptive routing algorithm called hybrid,
weighted routing. Simulations show that hybrid,
weighted routing is preferable to other adaptive
routing techniques for both packet switched networks
and integrated networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonsalves:1987:PEV,
author = "Timothy A. Gonsalves and Fouad A. Tobagi",
title = "Performance of the {Expressnet} with voice\slash data
traffic",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "16--26",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the past few years, local area networks have come
into widespread use for the interconnection of
computers. Together with the trend towards digital
transmission in voice telephony, this has spurred
interest in integrated voice/data networks. The
Expressnet, an implicit-token round-robin scheme using
unidirectional busses, achieves high performance even
at bandwidths of 100 Mb/s. Other features that make the
protocol attractive for voice/data traffic are bounded
delays and priorities. The latter is achieved by
devoting alternate rounds to one or the other of the
two traffic types. By the use of accurate simulation,
the performance of the Expressnet with voice/data
traffic is characterized. It is shown that the
Expressnet satisfies the real-time constraints of voice
traffic adequately even at bandwidths of 100 Mb/s. Data
traffic is able to effectively utilize bandwidth unused
by voice traffic. The trade-offs in the alternating
round priority mechanism are quantified. Loss of voice
samples under overload is shown to occur regularly in
small, frequent clips, subjectively preferable to
irregular clips. In a comparison of the Expressnet, the
contention-based Ethernet and the round-robin Token Bus
protocols, the two round-robin protocols are found to
perform better than the Ethernet under heavy load owing
to the more deterministic mode of operation. The
comparison of the two round-robin protocols highlights
the importance of minimizing scheduling overhead at
high bandwidths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agrawal:1987:ARD,
author = "Subhash Agrawal and Ravi Ramaswamy",
title = "Analysis of the resequencing delay for {M/M/m}
systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "27--35",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many virtual circuit service communications networks
such as SNA employ virtual circuit transmission method
inside the subnet. An essential feature of such
networks is that the sequence in which messages are
transmitted is maintained throughout the route from
source node to the destination node. When there are
multiple links connecting two intermediate nodes in the
route and the messages are of different lengths, then
it is possible that the messages complete transmission
at the next node out of sequence. These messages then
have to be resquenced, i.e. put in the right order, in
order to provide a virtual circuit service. The
resequencing operation introduces an additional delay
in transmission which may be significant. In this paper
the probability distribution of the resequencing delay
is obtained for the M/M/m system. Simple expressions
for the mean and coefficient of variation of the
resequencing delay are also provided. It is shown
through a variety of numerical examples that the
resequencing delay is likely to be a significant
component of the overall response time. Some
interesting aspects of dependence of the mean
resequencing delay on system parameters are studied
analytically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reed:1987:PDE,
author = "Daniel A. Reed and Allen D. Malony and Bradley D.
McCredie",
title = "Parallel discrete event simulation: a shared memory
approach",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "36--38",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bucher:1987:CLV,
author = "Ingrid Y. Bucher and Margaret L. Simmons",
title = "A close look at vector performance of
register-to-register vector computers and a new model",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "39--45",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Darema-Rogers:1987:MAP,
author = "F. Darema-Rogers and G. F. Pfister and K. So",
title = "Memory access patterns of parallel scientific
programs",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "46--58",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A parallel simulator, PSIMUL, has been used to collect
information on the memory access patterns and
synchronization overheads of several scientific
applications. The parallel simulation method we use is
very efficient and it allows us to simulate execution
of an entire application program, amounting to hundreds
of millions of instructions. We present our
measurements on the memory access characteristics of
these applications; particularly our observations on
shared and private data, their frequency of access and
locality. We have found that, even though the shared
data comprise the largest portion of the data in the
application program, on the average a small fraction of
the memory references are to shared data. The low
averages do not preclude bursts of traffic to shared
memory nor does it rule out positive benefits from
caching shared data. We also discuss issues of
synchronization overheads and their effect on
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Geist:1987:DSS,
author = "Robert Geist and Robert Reynolds and Eve Pittard",
title = "Disk scheduling in {System V}",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "59--68",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A variety of disk scheduling algorithms, including
some newly defined ones, are compared both in
simulation and in tests on a real machine running UNIX*
System V. In the real system tests, first-come
first-served (FCFS), shortest seek time first (SSTF),
and the standard System V algorithm (SVS) are all seen
to yield relatively poor mean waiting time performance
when compared to the VSCAN(0.2) algorithm and
modifications thereof suggested by Coffman.
Nevertheless, each is seen to excel along a particular
performance dimension. The adequacy of open, Poisson
arrival simulation models in predicting disk scheduling
performance is questioned, and an alternative arrival
model is suggested which offers improved predictions in
the System V environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Livny:1987:MDM,
author = "Miron Livny and Setrag Khoshafian and Haran Boral",
title = "Multi-disk management algorithms",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "69--77",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate two schemes for placing data on
multiple disks. We show that declustering (spreading
each file across several disks) is inherently better
than clustering (placing each file on a single disk)
due to a number of reasons including parallelism and
uniform load on all disks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buzen:1987:UOT,
author = "Jeffrey P. Buzen and Annie W. Shum",
title = "A unified operational treatment of {RPS} reconnect
delays",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "78--92",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Expressions are presented for RPS reconnect delays in
three basic cases: single path, multiple path with
static reconnect, multiple path with dynamic reconnect.
The assumption of homogeneous reconnects, which is
introduced in the analysis, is shown to be implicit in
many prior analyses. This assumption simplifies the
resulting equations, but more general equations are
also presented for the case where homogeneous
reconnects are not assumed. These general results have
not appeared previously. This paper also uses the
assumption of constrained independence to derive a
result for static reconnect which has only been derived
previously using the maximum entropy principle. In the
case of dynamic reconnect, constrained independence
yields an entirely new closed form result. In addition
to being a consistent extension of the static reconnect
case, this new result is the only closed form
expression for dynamic reconnect that yields a correct
solution in certain saturated cases. Constrained
independence can provide a useful alternative
assumption in many other cases where complete
independence is known to be only approximately
correct.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:1987:PAP,
author = "R. Nelson and D. Towsley and A. N. Tantawi",
title = "Performance analysis of parallel processing systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "93--94",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A centralized parallel processing system with job
splitting is considered. In such a system, jobs wait in
a central queue, which is accessible by all the
processors, and are split into independent tasks that
can be executed on separate processors. This parallel
processing system is modeled as a bulk arrival MX/M/c
queueing system where customers and bulks correspond to
tasks and jobs, respectively. Such a system has been
studied in [1, 3] and an expression for the mean
response time of a random customer is obtained.
However, since we are interested in the time that a job
spends in the system, including synchronization delay,
we must evaluate the bulk response time rather than
simply the customer response time. The job response
time is the sum of the job waiting time and the job
service time. By analyzing the bulk queueing system we
obtain an expression for the mean job waiting time. The
mean job service time is given by a set of recurrence
equations. To compare this system with other parallel
processing systems, the following four models are
considered: Distributed/Splitting (D/S), Distributed/No
Splitting (D/NS), Centralized/Splitting (C/S), and
Centralized/No Splitting (C/NS). In each of these
systems there are $c$ processors, jobs are assumed to
consist of set of tasks that are independent and have
exponentially distributed service requirements, and
arrivals of jobs are assumed to come from a Poisson
point source. The systems differ in the way jobs queue
for the processors and in the way jobs are scheduled on
the processors. The queueing of jobs for processors is
distributed if each processor has its own queue, and is
centralized if there is a common queue for all the
processors. The scheduling of jobs on the processors is
no splitting if the entire set of tasks composing that
job are scheduled to run sequentially on the same
processor once the job is scheduled. On the other hand,
the scheduling is splitting if the tasks of a job are
scheduled so that they can be run independently and
potentially in parallel on different processors. In the
splitting case a job is completed only when all of its
tasks have finished execution. In our study we compare
the mean response time of jobs in each of the systems
for differing values of the number of processors,
number of tasks per job, server utilization, and
certain overheads associated with splitting up a job.
The MX/M/c system studied in the first part of the
paper corresponds to the C/S system. In this system, as
processors become free they serve the first task in the
queue. D/. systems are studied in [2]. We use the
approximate analysis of the D/S system and the exact
analysis of the D/NS system that are given in that
paper. For systems with 32 processors or less, the
relative error in the approximation for the D/S system
was found to be less than 5 percent. In the D/NS
system, jobs are assigned to processors with equal
probabilities. The approximation we use for the mean
job response time for the C/NS system is found in [4].
Although an extensive error analysis for this system
over all parameter ranges has not been carried out, the
largest relative error for the M/E2/10 system reported
in [4] is about 0.1 percent. For all values of
utilization, \rho, our results show that the splitting
systems yield lower mean job response time than the no
splitting systems. This follows from the fact that, in
the splitting case, work is distributed over all the
processors. For any \rho, the lowest (highest) mean job
response time is achieved by the C/S system (the D/NS
system). The relative performance of the D/S system and
the C/NS system depends on the value of \rho. For small
\rho, the parallelism achieved by splitting jobs into
parallel tasks in the D/S system reduces its mean job
response time as compared to the C/NS system, where
tasks of the same job are executed sequentially.
However, for high \rho, the C/NS system has lower mean
job response time than the D/S system. This is due to
the long synchronization delay incurred in the D/S
system at high utilizations. The effect of parallelism
on the performance of parallel processing systems is
studied by comparing the performance of the C/NS system
to that of the C/S system. The performance improvement
obtained by splitting jobs into tasks is found to
decrease with increasing utilization. For a fixed
number of processors and fixed \rho, we find that by
increasing the number of tasks per job, i.e. higher
parallelism, the mean job response time of the C/NS
system relative to that of the C/S system increases. By
considering an overhead delay associated with splitting
jobs into independent tasks, we observe that the mean
job response time is a convex function of the number of
tasks, and thus, for a given arrival rate, there exists
a unique optimum number of tasks per job. We also
consider problems associated with partitioning the
processors into two sets, each dedicated to one of two
classes of jobs: edit jobs and batch jobs. Edit jobs
are assumed to consist of simple operations that have
no inherent parallelism and thus consist of only one
task. Batch jobs, on the other hand, are assumed to be
inherently parallel and can be broken up into tasks.
All tasks from either class are assumed to have the
same service requirements. A number of interesting
phenomena are observed. For example, when half the jobs
are edit jobs, the mean job response time for both
classes of jobs increases if one processor is allocated
to edit jobs. Improvement to edit jobs, at a cost of
increasing the mean job response time of batch jobs,
results only when the number of processors allocated to
edit jobs is increased to two. This, and other results,
suggest that it is desirable for parallel processing
systems to have a controllable boundary for processor
partitioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:1987:RDR,
author = "Ziao-Nan Tan and Kenneth C. Sevcik",
title = "Reduced distance routing in single-state
shuffle-exchange interconnection networks",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "95--110",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In multiprocessor architectures, it is frequently
necessary to provide parallel communication among a
potentially large number of processors and memories.
Among the many interconnection schemes that have been
proposed and analyzed, shuffle-exchange networks have
received much attention due to their ability to allow a
message to pass from any node to any other node in a
number of steps that grows only logarithmically with
the number of interconnected nodes (in the absence of
contention) while keeping the number of hardware
connections per node independent of the number of
nodes. Straight-forward use of shuffle-exchange
networks to interconnect $N$ nodes involves having
every packet pass through $ \log_2 N$ stages enroute to
its destination. By exploiting common structure in the
addresses of the source and destination nodes, however,
more sophisticated routing can reduce the average
number of steps per message below $ \log_2 N$. In this
paper, we describe and evaluate three levels of
improvements to basic single-stage shuffle-exchange
routing. Each one yields successively more benefit at
the cost of more complexity. Using simulation, we show
that the use of routing schemes that reduce the average
distance can substantially reduce average message delay
times and increase interconnection network capacity. We
quantify the performance gains only in the case where
messages from one node are destined with uniform
probability over all nodes. However, it is clear that
the advantage of the new schemes we propose would be
still greater if there is some ``locality'' of
communication that can be exploited by having the most
frequent communication occur between pairs of nodes
with shorter distances separating them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bouras:1987:QDB,
author = "Christos Bouras and John Garofalakis",
title = "Queueing delays in buffered multistage interconnection
networks",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "111--121",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29918",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our work deals with the analysis of the queueing
delays of buffered multistage Banyan networks of
multiprocessors. We provide tight upper bounds on the
mean delays of the second stage and beyond, in the case
of infinite buffers. Our results are validated by
simulations performed on a network simulator
constructed by us. The analytic work for network stages
beyond the first, provides a partial answer to open
problems posed by previous research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garcia-Molina:1987:PTM,
author = "Hector Garcia-Molina and Lawrence R. Rogers",
title = "Performance through memory",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "122--131",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two of the most important parameters of a computer are
its processor speed and physical memory size. We study
the relationship between these two parameters by
experimentally evaluating the intrinsic memory and
processor requirements of various applications. We also
explore how hardware prices are changing the cost
effectiveness of these two resources. Our results
indicate that several important applications are
``memory-bound,'' i.e., can benefit more from increased
memory than from a faster processor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jipping:1987:PPC,
author = "Michael J. Jipping and Ray Ford",
title = "Predicting performance of concurrency control
designs",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "132--142",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance is a high-priority consideration when
designing concurrent or distributed systems. The
process of designing such a system is complicated by
two factors: (1) the current state-of-the-art in
concurrent system design is very ad hoc --- software
design principles for concurrent systems are still in
their infancy, and (2) performance evaluation of
concurrent systems is quite difficult and it is
especially difficult to relate aspects of the design to
aspects of the implementation. This paper reports on
work with a performance modeling technique for
concurrent or distributed systems that allows
structured design to be related to the implementation
of the concurrency control component of the system.
First, a General Process Model (GPM) is used to
organize system design information into a six level
hierarchy. The abstract performance properties of each
level in the hierarchy have been established using
concurrency control theory. Next, we describe how to
translate the structured system design into efficient
concurrency control techniques, using elements of this
theory. Finally, a prototype automated design
evaluation tool which serves as a central component of
the design methodology is described.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dahbura:1987:PAF,
author = "Anton T. Dahbura and Krishan K. Sabnani and William J.
Hery",
title = "Performance analysis of a fault detection scheme in
multiprocessor systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "143--154",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A technique is described for detecting and diagnosing
faults at the processor level in a multiprocessor
system. In this method, a process is assigned whenever
possible to two processors: the processor that it would
normally be assigned to (primary) and an additional
processor which would otherwise be idle (secondary).
Two strategies will be described and analyzed: one
which is preemptive and another which is
non-preemptive. It is shown that for moderately loaded
systems, a sufficient percentage of processes can be
performed redundantly using the system's spare capacity
to provide a basis for fault detection and diagnosis
with virtually no degradation of response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salsburg:1987:SAC,
author = "Michael A. Salsburg",
title = "A statistical approach to computer performance
modeling",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "155--162",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Models of discrete systems are often utilized to
assist in computer engineering and procurement. The
tools for modeling have been traditionally developed
using either analytic methods or discrete event
simulation. The research presented here explores the
use of statistical techniques to augment and assist
this basic set of tools.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kerola:1987:MPM,
author = "Teemu Kerola and Herb Schwetman",
title = "{Monit}: a performance monitoring tool for parallel
and pseudo-parallel programs",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "163--174",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a performance monitoring system,
Monit, developed for performance evaluation of parallel
systems. Monit uses trace files that are generated
during the execution of parallel programs. Monit
analyzes these trace files and produces time-oriented
graphs of resource usage and system queues. Users
interactively select the displayed items, resolution,
and time intervals of interest. The current
implementation of Monit is for SUN-3 workstation, but
the program is easily adaptable to other devices. We
also introduce a parallel programming environment, PPL,
implemented as a superset of $C$ for the Sequent
Balance 8000 multi-processor system. Parallel programs
written in PPL can produce the trace files for Monit.
Monit is also integrated into a process-oriented
simulation language CSIM. CSIM allows the creation of
simulation models based on multiple processes competing
for resources. The similarity between parallel
processes in PPL and pseudo-parallel processes in CSIM
facilitates this combined use of Monit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marsan:1987:MSA,
author = "M. Ajmone Marsan and G. Balbo and G. Chiola and G.
Conte",
title = "Modeling the software architecture of a prototype
parallel machine",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "175--185",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A high-level Petri net model of the software
architecture of an experimental MIMD multiprocessor
system for Artificial Intelligence applications is
derived by direct translation of the code corresponding
to the assumed workload. Hardware architectural
constraints are then easily added, and formal reduction
rules are used to simplify the model, which is then
further approximated to obtain a performance model of
the system based on generalized stochastic Petri nets.
From the latter model it is possible to estimate the
optimal multiprogramming level of each processor so as
to achieve the maximum performance in terms of overall
throughput (number of tasks completed per unit time).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alexander:1987:WCP,
author = "William Alexander and Tom W. Keller and Ellen E.
Boughter",
title = "A workload characterization pipeline for models of
parallel systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "186--194",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The same application implemented on different systems
will necessarily present different workloads to the
systems. Characterizations of workloads intended to
represent the same application, but input to models of
different systems, must also differ in analogous ways.
We present a hierarchical method for characterizing a
workload at increasing levels of detail such that every
characterization at a lower level still accurately
represents the workload at higher levels. We discuss
our experience in using the method to feed the same
application through a workload characterization
``pipeline'' to two different models of two different
systems, a conventional relational database system and
a logic-based distributed database system. We have
developed programs that partially automate the
characterization changes that are required when the
system to be modeled changes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Graf:1987:TBD,
author = "Ingrid M. Graf",
title = "Transformation between different levels of workload
characterization for capacity planning: fundamentals
and case study",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "195--204",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing network models are effective tools for
capacity planning of computer systems. The base of all
performance oriented questions is the characterization
of the computer system workload. At the capacity
planning level the workload is described in
user-oriented terms. At the system level the queueing
network model requires input parameters, which differ
from the workload description at the capacity planning
level. In this paper a general procedure to transform
the parameters between these two levels is presented
and applied to a case study. The effect on system
performance of an increase in the use of an existing
application system is analysed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ruan:1987:PAF,
author = "Zuwang Ruan and Walter F. Tichy",
title = "Performance analysis of file replication schemes in
distributed systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "205--215",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In distributed systems the efficiency of the network
file system is a key performance issue. Replication of
files and directories can enhance file system
efficiency, but the choice of replication techniques is
crucial. This paper studies a number of replication
techniques, including remote access, prereplication,
weighted voting, and two demand replication schemes:
polling and staling. It develops a Markov chain model,
which is capable of characterizing properties of file
access sequences, including access locality and access
bias. The paper compares the replication techniques
under three different network file system
architectures. The results show that, under reasonable
assumptions, demand replication requires fewer file
transfers than remote access, especially for files that
have a high degree of access locality. Among the demand
replication schemes, staling requires fewer auxiliary
messages than polling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cheriton:1987:NMV,
author = "David R. Cheriton and Carey L. Williamson",
title = "Network measurement of the {VMTP} request-response
protocol in the {V} distributed system",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "216--225",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Communication systems are undergoing a change in use
from stream to request-response or transaction
communication. In addition, communication systems are
becoming increasingly based on high-speed, low delay,
low error rate channels. These changes call for a new
generation of networks, network interfaces, and
transport protocol design. The performance
characteristics of request-response protocols on these
high-performance networks should guide the design of
this new generation, yet relatively little data of this
nature is available. In this paper, we present some
preliminary measurements of network traffic for a
cluster of workstations connected by Ethernet running
the V distributed operating system. We claim that this
system, with its use of a high-speed local area network
and a request-response transport protocol tuned for
RPC, provides some indication of the performance
characteristics for systems in the next generation of
communication systems. In particular, these
measurements provide an indication of network traffic
patterns, usage characteristics for request-response
protocols, and the behavior of the request-response
protocol itself. These measurements suggest in general
that a key design focus must be on minimizing network
latency and that a request-response protocol is
well-suited for this goal. This focus has implications
for protocol design and implementation as well as for
the design of networks and network interfaces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salehmohamed:1987:PEL,
author = "Mohamed Salehmohamed and W. S. Luk and Joseph G.
Peters",
title = "Performance evaluation of {LAN} sorting algorithms",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "226--233",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29929",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We adapt several parallel sorting algorithms (block
sorting algorithms) and distributed sorting algorithms
for implementation on an Ethernet network with diskless
Sun workstations. We argue that the performance of
sorting algorithms on local area networks (LANs) should
be analyzed in a manner that is different from the ways
that parallel and distributed sorting algorithms are
usually analyzed. Consequently, we propose an empirical
approach which will provide more insight into the
performance of the algorithms. We obtain data on
communication time, local processing time, and response
time (i.e. total running time) of each algorithm for
various file sizes and different numbers of processors.
Comparing the performance data with our theoretical
analysis, we attempt to provide rationale for the
behaviour of the algorithms and project the future
behaviour of the algorithms as file size, number of
processors, or interprocessor communication facilities
change.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Polyzos:1987:DAW,
author = "George C. Polyzos and Mart L. Molle",
title = "Delay analysis of a window tree conflict resolution
algorithm in a local area network environment",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "234--244",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29930",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Expressions are found for the throughput and delay
performance of a Tree Conflict Resolution Algorithm
that is used in a Local Area Network with carrier
sensing (and possibly also collision detection). We
assume that Massey's constant size window algorithm is
used to control access to the channel, and that the
resulting conflicts (if any) are resolved using a
Capetanakis-like preorder traversal tree algorithm with
d-ary splitting. We develop and solve functional
equations for various performance metrics of the system
and apply the ``Moving Server'' technique to calculate
the main component of the delay. Our results compare
very favorably with those for CSMA protocols, which are
commonly used in Local Area Networks that support
sensing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1987:SCB,
author = "Scott Shenker",
title = "Some conjectures on the behavior of
acknowledgement-based transmission control of random
access communication channels",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "245--255",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29904.29931",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class of acknowledgment-based transmission control
algorithms is considered. In the finite population
case, we claim that algorithms based on backoff
functions which increase faster than linearly but
slower than exponentially are stable up to full channel
capacity, whereas sublinear, exponential, and
superexponential algorithms are not. In addition,
comments are made about the nature of the
quasistationary behavior in the infinite population
case, and about how systems interpolate between the
finite and infinite number of station cases. The
treatment presented here is nonrigorous, consisting of
approximate analytic arguments confirmed by detailed
numerical simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mathys:1987:ECE,
author = "Peter Mathys and Boi V. Faltings",
title = "The effect of channel-exit protocols on the
performance of finite population random-access
systems",
journal = j-SIGMETRICS,
volume = "15",
number = "1",
pages = "256--267",
month = may,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/29903.29932",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:04:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random-access systems (RAS) for collision-type
channels have been studied extensively under the
assumption of an infinite population which generates a
Poisson arrival process. If the population is finite
and if the (practically desirable) free-access
channel-access protocol is used, then it is shown that
the specification of a channel-exit protocol is crucial
for the stability and the fairness of the RAS.
Free-exit and blocked-exit protocols are analyzed and
it is concluded that the p-persistent blocked-exit
protocol provides the mechanisms to assure stability
and fairness for a wide range of arrival process
models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fisher:1987:IIA,
author = "Robert Fisher",
title = "The impact of interactive application development with
{CODESTAR}",
journal = j-SIGMETRICS,
volume = "15",
number = "2",
pages = "13--15",
month = aug,
year = "1987",
CODEN = "????",
DOI = "https://doi.org/10.1145/32100.32101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many companies are currently plagued with the problem
of not being able to deliver information systems
quickly enough to meet business opportunities.
Management is generally dissatisfied with the
development cycle time, and backlogs are often two
years or more. Texas Instruments has a strategic
program to solve this problem by developing an
integrated set of tools to automate the systems life
cycle of analysis, design, construction and
maintenance, and to reduce associated costs. CODESTAR,
the first major tool to be completed (currently for use
only at TI), addresses both construction and
maintenance. It supports applications ranging from
simple to complex and can be used for the development
of IMS, batch and TSO applications. For example, the
current CODESTAR was developed using the previous
CODESTAR.A pilot project assessed the impact of
CODESTAR. The project's scope included the
construction, checkout and installation of a 20-screen
IMS transaction system involving 6,000 lines of code.
The project had originally been designed, scheduled and
budgeted for a non-CODESTAR methodology. Results were
impressive. Both elapsed time and manpower were reduced
by 50 percent, while computer costs decreased
slightly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korner:1988:EED,
author = "Ulf K{\"o}rner and Serge Fdida and Harry Perros and
Gerald Shapiro",
title = "End to end delays in a catenet environment",
journal = j-SIGMETRICS,
volume = "15",
number = "3--4",
pages = "20--28",
month = feb,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041849.1041850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a hierarchical model for a catenet
environment. The model consists of three levels of
models, and it reflects the end to end delay between
two host computers each connected to a different LAN.
The two LANs are connected via gateways by a WAN. The
model incorporates a basic flow control mechanism,
standardized local area network behaviour, as well as
gateway functions in terms of packet fragmentation and
reassembly. The model can be used to obtain performance
measures such as the mean end to end delay and the
system's throughput as a function of parameters such as
arrival rate of packets, maximum window size, and
traffic mix.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:1988:TSA,
author = "Ravi S. Sharma",
title = "Three simple algorithms for the {N/1/F Problem}",
journal = j-SIGMETRICS,
volume = "15",
number = "3--4",
pages = "29--32",
month = feb,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041849.1041851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, various techniques such as Divide and
Conquer, Greedy, and Dynamic Programming are used to
solve the N/1/F problem. [4]The algorithms are
presented and proven theoretically. They are also
tested with an example. Complexity analysis is then
performed. These algorithms are different from the
previous ones that solve the same problem in that they
use the basic techniques of Operations Research in
isolation. This simplicity is an attractive feature not
only for purposes of implementation but also in
understanding the problem and its solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analysis of algorithms; computational complexity;
operations modeling; scheduling; software design",
}
@Article{Covington:1988:RPP,
author = "R. C. Covington and S. Madala and V. Mehta and J. R.
Jump and J. B. Sinclair",
title = "The {Rice Parallel Processing Testbed}",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "4--11",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55596",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the most important trends in high performance
computing is the development and general availability
of parallel processing systems. The designers and users
of such systems have the difficult task of utilizing
the available parallelism in both hardware and
algorithms effectively to realize as much performance
improvement as possible over sequential systems. This
requires matching the structure of parallel programs
with the structure of the concurrent system on which
they are to execute. This in turn makes it necessary to
develop performance evaluation techniques that are more
sophisticated and cost-effective than those currently
used. The Rice Parallel Processing Testbed (RPPT), the
subject of this paper, is a major step in this
direction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lubachevsky:1988:EDE,
author = "B. D. Lubachevsky",
title = "Efficient distributed event driven simulations of
multiple-loop networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "12--24",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulating asynchronous multiple-loop networks is
commonly considered a difficult task for parallel
programming. This paper presents two examples of
asynchronous multiple-loop networks: a stylized queuing
system and an Ising model. The network topology in both
cases is an $ n \times n $ grid on a torus. A new
distributed simulation algorithm is demonstrated on
these two examples. The algorithm combines three
elements: (1) the bounded lag restriction, (2)
precomputed minimal propagation delays, and (3) the
so-called opaque periods. Theoretical performance
evaluation suggests that if $N$ processing elements
(PEs) execute the algorithm in parallel and the
simulated system exhibits sufficient density of events,
then, in average, processing one event would require $
\Omega (\log N)$ instructions of one PE. In practice,
the algorithm has achieved substantial speed-ups: the
speed-up is greater than 16 using 25 PEs on a shared
memory MIMD bus computer, and greater than 1900 using
214 PEs on a SIMD computer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lucier:1988:PEM,
author = "B. J. Lucier",
title = "Performance evaluation for multiprocessors programmed
using monitors",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "22--29",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55598",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a classification of synchronization delays
inherent in multiprocessor systems programmed using the
monitor paradigm. This characterization is useful in
relating performance of such systems to algorithmic
parameters in subproblems such as domain decomposition.
We apply this approach to a parallel, adaptive grid
code for solving the equations of one-dimensional gas
dynamics implemented on shared memory multiprocessors
such as the Encore Multimax.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ganz:1988:QAF,
author = "A. Ganz and I. Chlamtac",
title = "Queueing analysis of finite buffer token networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "30--36",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55599",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper introduces analytic models for evaluating
demand assignment protocols in realistic finite
buffer/finite station network configurations. We
present a solution for implicit and explicit token
passing systems enabling us to model local area
networks, such as Token Bus. We provide, for the first
time, a tractable approximate solution by using an
approach based on restricted occupancy urn models. The
presented approximation involves the solving of linear
equations whose number is linear and equal only to the
number of buffers in the system. It is demonstrated
that in addition to its simplicity, the presented
approximation is also highly accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zafirovic-Vukotic:1988:PMH,
author = "M. Zafirovic-Vukotic and I. G. M. M. Niemegeers",
title = "Performance modelling of a {HSLAN} slotted ring
protocol",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "37--46",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The slotted ring protocol which is evaluated in this
paper is suitable for use at very large transmission
rates. In terms of modelling it is a multiple cyclic
server system. A few approximative analytical models of
this protocol are presented and evaluated vs the
simulation in this paper. The cyclic server model shows
to be the most accurate and usable over a wide range of
parameters. A performance analysis based on this model
is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiu:1988:CSD,
author = "D.-M. Chiu and R. Sudama",
title = "A case study of {DECnet} applications and protocol
performance",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "47--55",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55602",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is a study based on measurements of network
activities of a major site of Digital's world-wide
corporate network. The study yields two kinds of
results: (1) DECnet protocol performance information
and (2) DECnet session statistics. Protocol performance
is measured in terms of the various network overhead
(non-data) packets in routing, transport and session
layers. From these protocol performance data, we are
able to review how effective various network protocol
optimizations are; for example the on/off flow control
scheme and the delayed acknowledgement scheme in the
transport protocol. DECnet session statistics
characterizes the workload in such a large network. The
attributes of a session include the user who started
it, the application invoked, the distance between the
user and the application, the time span, the number of
packets and bytes in each direction, and the various
reasons if a session is not successfully established.
Based on a large sample of such sessions, we generate
distributions based on various attributes of sessions;
for example the application mix, the visit count
distribution and various packet number and size
distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1988:AAL,
author = "S. Shenker and A. Weinrib",
title = "Asymptotic analysis of large heterogeneous queueing
systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "56--62",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55603",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As a simple example of a large heterogeneous queueing
system, we consider a single queue with many servers
with differing service rates. In the limit of
infinitely many servers, we identify a queue control
policy that minimizes the average system delay. When
there are only two possible server speeds, we can
analyze the convergence of this policy to optimality.
Based on this result, we propose policies for large but
finite systems with a general distribution of server
speeds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eager:1988:LPB,
author = "D. L. Eager and E. D. Lazowska and J. Zahorjan",
title = "The limited performance benefits of migrating active
processes for load sharing",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "63--72",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Load sharing in a distributed system is the process of
transparently sharing workload among the nodes in the
system to achieve improved performance. In
non-migratory load sharing, jobs may not be transferred
once they have commenced execution. In load sharing
with migration, on the other hand, jobs in execution
may be interrupted, moved to other nodes, and then
resumed. In this paper we examine the performance
benefits offered by migratory load sharing beyond those
offered by non-migratory load sharing. We show that
while migratory load sharing can offer modest
performance benefits under some fairly extreme
conditions, there are no conditions under which
migration yields major performance benefits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:1988:LGA,
author = "J. Hong and X. Tan and M. Chen",
title = "From local to global: an analysis of nearest neighbor
balancing on hypercube",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "73--82",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55605",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper will focus on the issue of load balancing
on a hypercube network of $N$ processors. We will
investigate a typical nearest neighbor balancing
strategy --- in which workloads among neighboring
processors are averaged at discrete time steps. The
computation model allows tasks, described by
independent random variables, to be generated and
terminated at all times. We assume that the random
variables at all nodes have equal expected value and
their variances are bounded by a constant d2, and we
let the difference DIFF between the actual load on each
node and the average load on the system describe the
deviation of the load on a node from the global average
value. The following analytical results are obtained:
The expected value of DIFF, denoted by E(DIFF), is 0.
The variance of DIFF, denoted by Var(DIFF), is
independent of time $t$, and Var(DIFF) $ \leq 1.386 d^2
+ 0.231 \log N$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:1988:ALM,
author = "K. Kant",
title = "Application level modeling of parallel machines",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "83--93",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55606",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the application level
performance modeling of parallel machines consisting of
a large number of processing elements (PE's) connected
in some regular structure such as mesh, tree,
hypercube, etc. There are $K$ problem types, each
arriving according to a Poisson process, and each of
which needs a PE substructure of some given size and
topology. Thus several problems can run on the machine
simultaneously. It is desired to characterize the
performance of such a system under various types of
allocation schemes. We show that if the queueing is
considered external to our model, it is possible to
construct a Markovian model with local balance
property. The time for which a substructure is held by
a problem could be generally distributed. The model can
be solved efficiently using standard techniques;
however, because of rather complex structure of the
state space, its direct enumeration is difficult to
avoid. We also show how the size of the state space can
be reduced when the set of allowed substructures is
highly regular. We then show how queueing delays can be
modeled approximately. Finally, we consider the
solution of models involving shared resources such as
global memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Born:1988:ADP,
author = "R. G. Born and J. R. Kenevan",
title = "Analytic derivation of processor potential utilization
in straight line, ring, square mesh, and hypercube
networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "94--103",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In multicomputer architectures, in which processors
communicate through message-passing, the overhead
encountered because of the need to relay messages can
significantly affect performance. Based upon some
simplifying assumptions including the rate at which a
processor generates messages being proportional to its
current potential utilization, processor utilizations
are analytically derived in matrix form for a
bidirectional straight line and square mesh. In
addition, closed form derivations are provided for a
unidirectional ring and an $n$-dimensional hypercube.
Finally, the theoretical results are found to be in
close agreement with discrete-event simulations of the
four architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Majumdar:1988:SMP,
author = "S. Majumdar and D. L. Eager and R. B. Bunt",
title = "Scheduling in multiprogrammed parallel systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "104--113",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Processor scheduling on multiprocessor systems that
simultaneously run concurrent applications is currently
not well-understood. This paper reports a preliminary
investigation of a number of fundamental issues which
are important in the context of scheduling concurrent
jobs on multiprogrammed parallel systems. The major
motivation for this research is to gain insight into
system behaviour and understand the basic principles
underlying the performance of scheduling strategies in
such parallel systems. Based on abstract models of
systems and scheduling disciplines, several high level
issues that are important in this context have been
analysed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:1988:HSC,
author = "N. M. Patel and P. G. Harrison",
title = "On hot-spot contention in interconnection networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "114--123",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major component of a parallel machine is its
interconnection network, which provides concurrent
communication between the processing elements. It is
common to use a multi-stage interconnection network
(MIN) which is constructed using crossbar switches and
introduces not only contention for destination
addresses but also additional contention for internal
switches. Both types of contention are increased when
non-local communication across a MIN becomes
concentrated on a certain destination address, for
example when a frequently-accessed data structure is
stored entirely in one element of a distributed memory.
Such an address, often called a hot-spot, affects the
blocking probability of paths to other destination
addresses because of the shared internal switches. This
paper describes an analytical model of hot-spot
contention and quantifies its effect on the performance
of a MIN with a circuit switching communication
protocol. We obtain performance measures for a MIN in
which partial paths are held during path building and
one destination address is more frequently chosen by
incoming traffic than other addresses.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kothari:1988:PAM,
author = "S. C. Kothari and A. Jhunjhunwala and A. Mukherjee",
title = "Performance analysis of multipath multistage
interconnection networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "124--132",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55610",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper closely examines the performance analysis
for unbuffered multipath multistage interconnection
networks. A critical discussion of commonly used
analysis is provided to identify a basic flaw in the
model. A new analysis based on the grouping of
alternate links is proposed as an alternative to
rectify the error. The results based on the new
analysis and extensive simulation are presented for
three representative networks. The simulation study
strongly supports the results of the new analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Melus:1988:MPE,
author = "J. L. Mel{\'u}s and E. Sanvicente and J.
Magri{\~n}{\'a}",
title = "Modelling and performance evaluation of multiprocessor
based packet switches",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "133--140",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an approximate analytic model for
the performance analysis of a class of multiprocessor
based packet switches. For these systems, processors
and common memory modules are grouped in clusters, each
of them composed of several processor-memory pairs that
communicate through a multiple bus interconnection
network. Intercluster communication is also achieved
using one or more busses. The whole network operates in
a circuit-switched mode. After access completion, a
processor remains active for an exponentially
distributed random time. Access times are also
exponential with different means, depending upon the
location (local, cluster, external) of the referenced
module. The arbitration is done on a priority basis.
The performance is predicted by computing the average
number of switched packets per time unit. Other related
indexes are also given. Numerical results are obtained
rather easily by solving a set of two algebraic
equations. Simulation is used to validate the accuracy
of the approximations used in the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1988:MCP,
author = "T. P. Lee",
title = "A manufacturing capacity planning experiment through
functional workload decomposition",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "141--150",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55612",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe an experiment to evaluate a
distributed architecture via functional database
workload decomposition. A workload in a circuit pack
assembly environment was decomposed and mapped onto a
frontend/backend distributed computer architecture. To
evaluate this distributed architecture, an operational
model for capacity planning was devised, and
performance and cost-effectiveness measures were
chosen. Model parameters were estimated through
benchmark experiments in a distributed system
consisting of various super-microcomputers connected by
a CSMA/CD local area network with INGRES as the
database management system. The frontend/backend
architecture consists of a backend data repository and
analysis computer system and a few frontend computer
systems dedicated for data collection and manufacturing
process verification. Because of the significant
software overhead in communication protocol and
database processing, information exchange was batched
between the backend and frontend systems to amortize
such cost to improve overall system performance.
Results of the experiments were analyzed to gain
quantitative insight on the feasibility of such
decomposition and its mapping onto the proposed
architecture. With sufficient batching, the proposed
distributed architecture not only has more overall
system capacity, but also is more cost-effective than
the typical centralized architecture. The approach
described is applicable in more general contexts.
Advantages of such distributed systems include the
relative robustness of the distributed architecture
under single point failure mode and the ease of
capacity growth by upgrading the computer systems
and/or by increasing the number of frontend systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Irgon:1988:FLS,
author = "A. E. Irgon and A. H. {Dragoni, Jr.} and T. O.
Huleatt",
title = "{FAST}: a large scale expert system for application
and system software performance tuning",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "151--156",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55613",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alexander:1988:CDC,
author = "W. Alexander and G. Copeland",
title = "Comparison of dataflow control techniques in
distributed data-intensive systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "157--166",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In dataflow architectures, each dataflow node (i.e.,
operation) is typically executed on a single physical
node. We are concerned with distributed data-intensive
systems, in which each base (i.e., persistent) set of
data has been declustered over many physical nodes to
achieve load balancing. Because of large base set size,
each operation is executed where the base set resides,
and intermediate results are transferred between
physical nodes. In such systems, each dataflow node is
typically executed on many physical nodes. Furthermore,
because computations are data-dependent, we cannot know
until run time which subset of the physical nodes
containing a particular base set will be involved in a
given dataflow node. This uncertainty affects program
loading, task activation and termination, and data
transfer among the nodes. In this paper we focus on the
problem of how a dataflow node in such an environment
knows when it has received data from all the physical
nodes from which it is ever going to receive. We call
this the dataflow control problem. The interesting part
of the problem is trying to achieve correctness
efficiently. We propose three solutions to this
problem, and compare them quantitatively by the metrics
of total message traffic, message system throughput and
data transfer response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leutenegger:1988:MVP,
author = "S. T. Leutenegger and M. K. Vernon",
title = "A mean-value performance analysis of a new
multiprocessor architecture",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "167--176",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a preliminary performance analysis
of a new large-scale multiprocessor: the Wisconsin
Multicube. A key characteristic of the machine is that
it is based on shared buses and a snooping cache
coherence protocol. The organization of the shared
buses and shared memory is unique and non-hierarchical.
The two-dimensional version of the architecture is
envisioned as scaling to 1024 processors. We develop an
approximate mean-value analysis of bus interference for
the proposed cache coherence protocol. The model
includes FCFS scheduling at the bus queues with
deterministic bus access times, and asynchronous memory
write-backs and invalidation requests. We use our model
to investigate the feasibility of the multiprocessor,
and to study some initial system design issues. Our
results indicate that a 1024-processor system can
operate at 75--95\% of its peak processing power, if
the mean time between cache misses is larger than 1000
bus cycles (i.e. 50 microseconds for 20 MHz buses; 25
microseconds for 40 MHz buses). This miss rate is not
unreasonable for the cache sizes specified in the
design, which are comparable to main memory sizes in
existing multiprocessors. We also present results which
address the issues of optimal cache block size, optimal
size of the two-dimensional Multicube, the effect of
broadcast invalidations on system performance, and the
viability of several hardware techniques for reducing
the latency for remote memory requests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blake:1988:SAR,
author = "J. T. Blake and A. L. Reibman and K. S. Trivedi",
title = "Sensitivity analysis of reliability and performability
measures for multiprocessor systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "177--186",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional evaluation techniques for multiprocessor
systems use Markov chains and Markov reward models to
compute measures such as mean time to failure,
reliability, performance, and performability. In this
paper, we discuss the extension of Markov models to
include parametric sensitivity analysis. Using such
analysis, we can guide system optimization, identify
parts of a system model sensitive to error, and find
system reliability and performability bottlenecks. As
an example we consider three models of a 16 processor.
16 memory system. A network provides communication
between the processors and the memories. Two
crossbar-network models and the Omega network are
considered. For these models, we examine the
sensitivity of the mean time to failure, unreliability,
and performability to changes in component failure
rates. We use the sensitivities to identify bottlenecks
in the three system models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukkamala:1988:DPR,
author = "R. Mukkamala and S. C. Bruell and R. K. Shultz",
title = "Design of partially replicated distributed database
systems: an integrated methodology",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "187--196",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The objective of this research is to develop and
integrate tools for the design of partially replicated
distributed database systems. Many existing tools are
inappropriate for designing large-scale distributed
databases due to their large computational
requirements. Our goal is to develop tools that solve
the design problems reasonably quickly, typically by
using heuristic algorithms that provide approximate or
near-optimal solutions. In developing this design
methodology, we assume that information regarding the
types of user requests and their rates of arrival into
the system is known a priori. The methodology assumes a
general model for transaction execution. In this paper
we discuss three aspects of the design methodology: the
data allocation problem, the use of a static
load-balancing scheme in coordination with the
allocation scheme, and the design evaluation and review
step. Our methodology employs iterative design
techniques using performance evaluation as a means to
iterate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wybranietz:1988:MPM,
author = "D. Wybranietz and D. Haban",
title = "Monitoring and performance measuring distributed
systems during operation",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "197--206",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an integrated tool for monitoring
distributed systems continuously during operation. A
hybrid monitoring approach is used. As special hardware
support a test and measurement processor (TMP) was
designed, which is part of each node in an experimental
multicomputer system. Each TMP runs local parts of the
monitoring software for its node, while all the TMPs
are connected to a central test station via a separate
TMP interconnection network. The monitoring system is
transparent to users. It permanently observes system
behavior, measures system performance and records
system information. The immense amount of information
is graphically displayed in easy-to-read-charts and
graphs in an application-oriented manner. The tools
promote an improved understanding of run time behavior
and performance measurements to derive qualitative and
even quantitative assessments about distributed
systems. A prototype of the monitoring facility is
operational and currently experiments are being
conducted in our distributed system consisting of
several MC68000 microcomputers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Melvin:1988:UMI,
author = "S. W. Melvin and Y. N. Patt",
title = "The use of microcode instrumentation for development,
debugging and tuning of operating system kernels",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "207--214",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have developed a tool based on microcode
modifications to a VAX 8600 which allows a wide variety
of operating system measurements to be taken with
minimal perturbation and without the need to modify any
operating system software. A trace of interrupts,
exceptions, system calls and context switches is
generated as a side-effect to normal execution. In this
paper we describe the tool we have developed and
present some results we have gathered under both UNIX
4.3 BSD and VAX/VMS V4.5. We compare the process fork
behavior of two different command shells under UNIX,
look at context switch rates for interactive and batch
workloads and generate a histogram for network
interrupt service time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agawal:1988:MRC,
author = "A. Agawal and A. Gupta",
title = "Memory-reference characteristics of multiprocessor
applications under {MACH}",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "215--225",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Shared-memory multiprocessors have received wide
attention in recent times as a means of achieving
high-performance cost-effectively. Their viability
requires a thorough understanding of the memory access
patterns of parallel processing applications and
operating systems. This paper reports on the memory
reference behavior of several parallel applications
running under the MACH operating system on a
shared-memory multiprocessor. The data used for this
study is derived from multiprocessor address traces
obtained from an extended ATUM address tracing scheme
implemented on a 4-CPU DEC VAX 8350. The applications
include parallel OPS5, logic simulation, and a VSLI
wire routing program. Among the important issues
addressed in this paper are the amount of sharing in
user programs and in the operating system, comparing
the characteristics of user and system reference
patterns, sharing related to process migration, and the
temporal, spatial, and processor locality of shared
blocks. We also analyze the impact of shared references
on cache coherence in shared-memory multiprocessors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Murphy:1988:CPB,
author = "J. M. Murphy and R. B. Bunt",
title = "Characterising program behaviour with phases and
transitions",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "226--234",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A detailed quantitative study of program behaviour is
described. Reference strings from a representative set
of programs were decomposed into phases and
transitions. Referencing behaviour is studied at both
the macro level (program-wide) and the micro level
(within the phases and transitions). Quantitative data,
suitable for the parameterization of program behaviour
models, is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yoshizawa:1988:ASC,
author = "Y. Yoshizawa and T. Arai",
title = "Adaptive storage control for page frame supply in
large scale computer systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "235--243",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A real storage management algorithm called Adaptive
Control of Page-frame Supply (ACPS) is described. ACPS
employees three strategies: prediction of the demand
for real page frames, page replacement based on the
prediction, and working set control. Together, these
strategies constitute the real page frame allocation
method, and contribute to short and stable response
times in conversational processing environments. ACPS
is experimentally applied to the VOS3 operating system.
Evaluation of ACPS on a real machine shows that TSS
response times are not affected too strongly by
king-size jobs and ACPS is successful in avoiding
paging delay and thrashing. ACPS prevents extreme
shortages of real storage in almost all cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pattipati:1988:PAM,
author = "K. R. Pattipati and M. M. Kostreva",
title = "On the properties of approximate mean value analysis
algorithms for queueing networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "244--252",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/55595.55623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents new formulations of the
approximate mean value analysis (MVA) algorithms for
the performance evaluation of closed product-form
queueing networks. The key to the development of the
algorithms is the derivation of vector nonlinear
equations for the approximate network throughput. We
solve this set of throughput equations using a
nonlinear Gauss--Seidel type distributed algorithms,
coupled with a quadratically convergent Newton's method
for scalar nonlinear equations. The throughput
equations have enabled us to: (a) derive bounds on the
approximate throughput; (b) prove the existence,
uniqueness, and convergence of the Schweitzer--Bard
(S-B) approximation algorithm for a wide class of
monotone, single class networks, (c) establish the
existence of the S-B solution for multi-class, monotone
networks, and (d) prove the asymptotic (i.e., as the
number of customers of each class tends to {\infty})
uniqueness of the S-B throughput solution, and the
asymptotic convergence of the various versions of the
distributed algorithms in multi-class networks with
single server and infinite server nodes. The asymptotic
convergence is established using results from convex
programming and convex duality theory. Extension of our
algorithms to mixed networks is straightforward. Only
multi-class results are presented in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tantawi:1988:OAM,
author = "A. N. Tantawi and G. Towsley and J. Wolf",
title = "Optimal allocation of multiple class resources in
computer systems",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "253--260",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A class-constrained resource allocation problem is
considered. In this problem, a set of $M$ heterogeneous
resources is to be allocated optimally among a set of
$L$ users belonging to $K$ user classes. A set of class
allocation constraints, which limit the number of users
of a given class that could be allocated to a given
resource, is imposed. An algorithm with worst case time
complexity $ O(M (L M + M^2 + L K))$ is presented along
with a proof of its correctness. This problem arises in
many areas of resource management in computer systems,
such as load balancing in distributed systems,
transaction processing in distributed database systems,
and session allocation in time-shared computer systems.
We illustrate the behavior of this algorithm with an
example where file servers are to be allocated to
workstations of multiple classes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hsieh:1988:PNA,
author = "C.-H. Hsieh and S. S. Lam",
title = "{PAM} --- a noniterative approximate solution method
for closed multichain queueing networks",
journal = j-SIGMETRICS,
volume = "16",
number = "1",
pages = "261--269",
month = may,
year = "1988",
CODEN = "????",
DOI = "https://doi.org/10.1145/1007771.55625",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:05:57 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Approximate MVA algorithms for separable queueing
networks are based upon an iterative solution of a set
of modified MVA formulas. Although each iteration has a
computational time requirement of $ O(M K^2) $ or less,
many iterations are typically needed for convergence to
a solution. ($M$ denotes the number of queues and $K$
the number of closed chains or customer classes.) We
present some faster approximate solution algorithms
that are noniterative. They are suitable for the
analysis and design of communication networks which may
require tens to hundreds, perhaps thousands, of closed
chains to model flow-controlled virtual channels. Three
PAM algorithms of increasing accuracy are presented.
Two of them have time and space requirements of $ O(M
K)$. The third algorithm has a time requirement of $
O(M K^2)$ and a space requirement of $ O(M K)$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1989:LBD,
author = "Anna Ha{\'c}",
title = "Load balancing in distributed systems: a summary",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "17--19",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most distributed systems are characterized by
distribution of both physical and logical features. The
architecture of a distributed system is usually
modular. Most distributed systems support a varying
number of processing elements. The system hardware,
software, data, user software and user data are
distributed across the system. An arbitrary number of
system and user processes can be executed on various
machines in the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1989:KBD,
author = "Anna Ha{\'c}",
title = "Knowledge-based distributed system architecture",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "20--20",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper introduces the learning system, the expert
system and an information broadcasting protocol for
designing and managing distributed systems. A
knowledge-based system can be implemented as a part of
operating system software to make decisions about
process transfer and message routing in a hierarchical
network. A knowledge-based system uses dynamic
information about the state of processors and
applications in the local and wide arca network. This
information consists of processors' and applications'
queue lengths, and it is broadcast to directly
connected processors. The expert system uses broadcast
information to make decisions about process transfer
and message routing, considering processor availability
and system security. The expert system causes
processors' queue lengths to become balanced on each
network hierarchy level. The number of process
transfers is calculated and depends on network
partitioning and the threshold values used by the
expert system. The convergence of the algorithms for
the knowledge-based system is proven. Performance of
the proposed system is evaluated analytically using the
elapsed time of process transfer or message transfer
and the waiting time to begin transfer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1989:DAA,
author = "Anna Ha{\'c}",
title = "Design algorithms for asynchronous operations in cache
memory",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "21--21",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The algorithms used to operate on disk buffer cache
memory have significant impact on operating system
performance. The buffer cache size, the size of the
file being written, the disk access time, and the
algorithms used to append updated blocks from the
buffer cache to the disk queue determine performance of
operations in disk cache memory. The determination of
these algorithms is particularly important since they
are implemented in the system kernel and cannot be
changed by the user or system administrator. This paper
introduces new algorithms for asynchronous operations
in disk buffer cache memory. These algorithms allow for
writing the files into the buffer cache by the
processes. The number of active processes in the system
and the length of the queue to the disk buffer cache
are considered in the algorithm design. This
information is obtained dynamically during the
execution of the algorithms. The performance of the
operations on the buffer cache is improved by using the
algorithms, which allow for writing the contents of the
buffer cache to the disk depending on the system load
and the write activity. The elapsed time of writing a
file into the buffer cache is calculated. The waiting
time to start writing a file is also considered. It is
shown that the elapsed time of writing a file decreases
by using the algorithms, which write the blocks to the
disk depending on the rate of write operations and the
number of active processes in the system. The time for
a block to become available for update in the buffer
cache is given. The number of blocks available for
update in the buffer cache is derived. The performance
of the algorithms is compared. It is shown that the
proposed algorithms allow for better performance than
an algorithm that does not use the information about
the system load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schneider:1989:AHS,
author = "Victor Schneider",
title = "Approximations for the {Halstead} software science
software error rate and project effort estimators",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "22--29",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Experimental estimators are presented relating the
expected number of software errors ($B$) in a software
development project to\par
$ \bullet $ the overall reported months of programmer
effort for the project $ (E)$ \par
$ \bullet $ the number of subprograms $ (n)$ \par
$ \bullet $ the count of thousands of coded source
statements $ (S)$.\par
These estimators are $ B \approx 7.6 E^{0.667}
S^{0.333}$ and $ B \approx n ((S / n) /
0.047)^{1.667}$.\par
These estimators are shown to be consistent with data
obtained from, the Air Force Rome Air Development
Center, the Naval Research Laboratory, and Fujitsu
Corporation. It is suggested here that more data is
needed to refine these estimators further.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Domanski:1989:PBE,
author = "Bernard Domanski",
title = "A {PROLOG}-based expert system for tuning {MVS\slash
XA}",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "30--47",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper will discuss some of the issues involved in
building an Expert System that embodies tuning rules
for IBM's MVS/XA operating system. To understand the
components of an Expert System and their functions,
PROLOG on an IBM PC (Turbo-PROLOG from Borland
International) was chosen as the development
environment. The paper will begin by defining the key
concepts about Expert Systems, Knowledge Engineering,
and Knowledge Acquisition. The reader will be given a
brief overview of PROLOG, from which we can explain how
an inference mechanism was developed. Finally, the
paper will describe the Expert System that was
developed, and additionally will provide a set of key
issues that should be addressed in the future. It is
our overall objective to provide new insight into the
application of AI to CPE.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Irvin:1989:QML,
author = "David R. Irvin",
title = "A queueing model for local area network bridges",
journal = j-SIGMETRICS,
volume = "16",
number = "2--4",
pages = "48--57",
month = feb,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041911.1041917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:07:49 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The buffer needed to match the transmission speeds of
two different local area networks interconnected by a
MAC-layer bridge is modeled as a G/M/1 queue. To
account for the problems caused by the arrivals of
traffic bursts from the higher-speed network, traffic
interarrival times are assumed to follow a
hyperexponential probability density function.
Selecting parameters for the hyperexponential
distribution to model realistic traffic conditions is
examined. A hypothetical bridge is discussed as an
example. Queue length for the G/M/1 system with
hyperexponential interarrivals is shown to depend
primarily on the persistence of bursts on the
higher-speed network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:1989:POP,
author = "J. Wolf",
title = "The placement optimization program: a practical
solution to the disk file assignment problem",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "1--10",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe a practical mathematical
formulation and solution of the so-called ``File
Assignment Problem'' (FAP) for computer disks. Our FAP
solution has been implemented in a PL/I program known
as the Placement Optimization Program (POP). The
algorithm consists of three major components --- two
heuristic optimization models and a queueing network
model. POP has been used in validation studies to
assign files to disks in two IBM MVS complexes. The
resulting savings in I/O response times were 22\% and
25\%, respectively. Throughout the paper we shall
emphasize the real-world nature of our approach to the
disk FAP, which we believe sets it apart from previous
attempts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kearns:1989:DDR,
author = "J. P. Kearns and S. DeFazio",
title = "Diversity in database reference behavior",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "11--19",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75374",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the past fifteen years, empirical studies of the
reference behavior of a number of database systems have
produced seemingly contradictory results. The presence
or absence of locality of reference and sequentiality
have both been reported (or denied) in various papers.
As such, the performance analyst or database
implementor is left with little concrete guidance in
the form of expected reference behavior of a database
system under a realistic workload. We present empirical
evidence that all of the previous results about
database reference behavior are correct (or incorrect).
That is, if the database reference sequence is viewed
on a per-transaction instance or per-database basis,
almost any reference behavior is discernible. Previous
results which report the absolute absence or presence
of a certain form of reference behavior were almost
certainly derived from reference traces which were
dominated by transactions or databases which exhibited
a certain behavior. Our sample consists of roughly
twenty-five million block references, from 350,000
transaction executions, directed at 175 operational
on-line databases at two major corporations. As such,
the sample is an order of magnitude more comprehensive
than any other reported in the literature. We also
present evidence that reference behavior is predictable
and exploitable when viewed on a per-transaction basis
or per-database basis. The implications of this
predictability for effective buffer management are
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:1989:SAD,
author = "J. Hellerstein",
title = "A statistical approach to diagnosing intermittent
performance-problems using monotone relationships",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "20--28",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Managing a computer system requires that good
performance (e.g., large throughputs, small response
times) be maintained in order to meet business
objectives. Rarely is performance consistently bad.
More frequently, performance is good one day and bad
the next. Diagnosing such intermittent
performance-problems involves determining what
distinguishes bad days from good days, such as larger
paging rates. Once this is understood, an appropriate
remedy can be found, such as buying more memory. This
paper describes a statistical approach to diagnosing
intermittent performance-problems when the
relationships among measurement variables are expressed
qualitatively as monotone relationships (e.g., paging
delays increase with the number of logged-on users). We
present a non-parametric test for monotonicity (NTM)
that evaluates monotone relationships based on FA, the
fraction of observation-pairs that agree with the
monotone relationship. An interpretation of FA in terms
of statistical significance levels is presented, and
NTM is compared to least-squares regression. Based on
NTM, an algorithm for diagnosing intermittent
performance-problems is presented. NTM and our
diagnosis algorithm are applied to measurements of four
similarly configured IBM 9370 model 60s running IBM's
operating-system Virtual Machine System Product (VM
SP).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muntz:1989:BAR,
author = "R. R. Muntz and E. {de Souza e Silva} and A. Goyal",
title = "Bounding availability of repairable computer systems",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "29--38",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov models are widely used for the analysis of
availability of computer/communication systems.
Realistic models often involve state space
cardinalities that are so large that it is impractical
to generate the transition rate matrix let alone solve
for availability measures. Various state space
reduction methods have been developed, particularly for
transient analysis. In this paper we present an
approximation technique for determining steady state
availability. Of particular interest is that the method
also provides bounds on the error. Examples are given
to illustrate the method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bubenik:1989:POM,
author = "R. Bubenik and W. Zwaenepoel",
title = "Performance of optimistic make",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "39--48",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optimistic make is a version of make that executes the
commands necessary to bring targets up-to-date prior to
the time the user types a make request. Side effects of
these optimistic computations (such as file or screen
updates) are concealed until the make request is
issued. If the inputs read by the optimistic
computations are identical to the inputs the
computation would read at the time the make request is
issued, the results of the optimistic computations are
used immediately, resulting in improved response time.
Otherwise, the necessary computations are reexecuted.
We have implemented optimistic make in the V-System on
a collection of SUN-3 workstations. Statistics
collected from this implementation are used to
synthesize a workload for a discrete-event simulation
and to validate its results. The simulation shows a
speedup distribution over pessimistic make with a
median of 1.72 and a mean of 8.28. The speedup
distribution is strongly dependent on the ratio between
the target out-of-date times and the command execution
times. In particular, with faster machines the median
of the speedup distribution grows to 5.1, and then
decreases again. The extra machine resources used by
optimistic make are well within the limit of available
resources, given the large idle times observed in many
workstation environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anderson:1989:PIT,
author = "T. E. Anderson and D. D. Lazowska and H. M. Levy",
title = "The performance implications of thread management
alternatives for shared-memory multiprocessors",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "49--60",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75378",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Threads (``lightweight'' processes) have become a
common element of new languages and operating systems.
This paper examines the performance implications of
several data structure and algorithm alternatives for
thread management in shared-memory multiprocessors.
Both experimental measurements and analytical model
projections are presented. For applications with
fine-grained parallelism, small differences in thread
management are shown to have significant performance
impact, often posing a tradeoff between throughput and
latency. Per-processor data structures can be used to
improve throughput, and in some circumstances to avoid
locking, improving latency as well. The method used by
processors to queue for locks is also shown to affect
performance significantly. Normal methods of critical
resource waiting can substantially degrade performance
with moderate numbers of waiting processors. We present
an Ethernet-style backoff algorithm that largely
eliminates this effect.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carter:1989:OIB,
author = "J. B. Carter and W. Zwaenepoel",
title = "Optimistic implementation of bulk data transfer
protocols",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "61--69",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During a bulk data transfer over a high speed network,
there is a high probability that the next packet
received from the network by the destination host is
the next packet in the transfer. An optimistic
implementation of a bulk data transfer protocol takes
advantage of this observation by instructing the
network interface on the destination host to deposit
the data of the next packet immediately into its
anticipated final location. No copying of the data is
required in the common case, and overhead is greatly
reduced. Our optimistic implementation of the V kernel
bulk data transfer protocols on SUN-3/50 workstations
connected by a 10 megabit Ethernet achieves peak
process-to-process data rates of 8.3 megabits per
second for 1-megabyte transfers, and 6.8 megabits per
second for 8-kilobyte transfers, compared to 6.1 and
5.0 megabits per second for the pessimistic
implementation. When the reception of a bulk data
transfer is interrupted by the arrival of unexpected
packets at the destination, the worst-case performance
of the optimistic implementation is only 15 percent
less than that of the pessimistic implementation.
Measurements and simulation indicate that for a wide
range of load conditions the optimistic implementation
outperforms the pessimistic implementation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stunkel:1989:TPT,
author = "C. B. Stunkel and W. K. Fuchs",
title = "{TRAPEDS}: producing traces for multicomputers via
execution driven simulation",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "70--78",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trace-driven simulation is an important aid in
performance analysis of computer systems. Capturing
address traces for these simulations is a difficult
problem for single processors and particularly for
multicomputers. Even when existing trace methods can be
used on multicomputers, the amount of collected data
typically grows with the number of processors, so I/O
and trace storage costs increase. A new technique is
presented in this paper which modifies the executable
code to dynamically collect the address trace from the
user code and analyzes this trace during the execution
of the program. This method helps resolve the I/O and
storage problems and facilitates parallel analysis of
the address trace. If a trace stored on disk is
desired, the generated trace information can also be
written to files during execution, with a resultant
drop in program execution speed. An initial
implementation on the Intel iPSC/2 hypercube
multicomputer is detailed, and sample simulation
results are presented. The effect of this trace
collection method on execution time is illustrated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gallivan:1989:BCM,
author = "K. Gallivan and D. Gannon and W. Jalby and A. Malony
and H. Wijshoff",
title = "Behavioral characterization of multiprocessor memory
systems: a case study",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "79--88",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The speed and efficiency of the memory system is a key
limiting factor in the performance of supercomputers.
Consequently, one of the major concerns when developing
a high-performance code, either manually or
automatically, is determining and characterizing the
influence of the memory system on performance in terms
of algorithmic parameters. Unfortunately, the
performance data available to an algorithm designer
such as various benchmarks and, occasionally,
manufacturer-supplied information, e.g. instruction
timings and architecture component characteristics, are
rarely sufficient for this task. In this paper, we
discuss a systematic methodology for probing the
performance characteristics of a memory system via a
hierarchy of data-movement kernels. We present and
analyze the results obtained by such a methodology on a
cache-based multi-vector processor (Alliant FX/8).
Finally, we indicate how these experimental results can
be used for predicting the performance of simple
Fortran codes by a combination of empirical
observations, architectural models and analytical
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Samples:1989:MNL,
author = "A. D. Samples",
title = "{Mache}: no-loss trace compaction",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "89--97",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75382",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Execution traces can be significantly compressed using
their referencing locality. A simple observation leads
to a technique capable of compressing execution traces
by an order of magnitude; instruction-only traces are
compressed by two orders of magnitude. This technique
is unlike previously reported trace compression
techniques in that it compresses without loss of
information and, therefore, does not affect
trace-driven simulation time or accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:1989:ERS,
author = "A. Mukherjee and L. H. Landweber and J. C.
Strikwerda",
title = "Evaluation of retransmission strategies in a local
area network environment",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "98--107",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an evaluation of retransmission strategies
over local area networks. Expressions are derived for
the expectation and the variance of the transmission
time of the go-back-n and the selective repeat
protocols in the presence of errors. These are compared
to the expressions for blast with full retransmission
on error (BFRE) derived by Zwaenepoel [Zwa 85]. We
conclude that go-back-n performs almost as well as
selective repeat and is very much simpler to implement
while BFRE is stable only for a limited range of
messages sizes and error rates. We also present a
variant of BFRE which optimally checkpoints the
transmission of a large message. This is shown to
overcome the instability of ordinary BFRE. It has a
simple state machine and seems to take full advantage
of the low error rates of local area networks. We
further investigate go-back-n by generalizing the
analysis to an upper layer transport protocol, which is
likely to encounter among other things, variable delays
due to protocol overhead, multiple connections, process
switches and operating system scheduling priorities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danzig:1989:FBF,
author = "P. B. Danzig",
title = "Finite buffers for fast multicast",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "108--117",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75384",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When many or all of the recipients of a multicast
message respond to the multicast's sender, their
responses may overflow the sender's available buffer
space. Buffer overflow is a serious, known problem of
broadcast-based protocols, and can be troublesome when
as few as three or four recipients respond. We develop
analytical models that calculate the expected number of
buffer overflows that can be used to estimate the
number of buffers necessary for an application. The
common cure for buffer overflow requires that
recipients delay their responses by some random amount
of time in order to increase the minimum spacing
between response messages, eliminate collisions on the
network, and decrease the peak processing demand at the
sender. In our table driven algorithm, the sender tries
to minimize the multicast's latency, the elapsed time
between its initial transmission of the multicast and
its reception of the final response, given the number
of times (rounds) it is willing to retransmit the
multicast. It includes in the multicast the time
interval over which it anticipates receiving the
response, the round timeout. We demonstrate that the
latency of single round multicasts exceeds the latency
of multiple round multicasts. We show how recipients
minimize the sender's buffer overflows by independently
choosing their response times as a function of the
round's timeout, sender's buffer size, and the number
of other recipients.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:1989:PDB,
author = "B. Mukherjee",
title = "Performance of a dual-bus unidirectional broadcast
network operating under probabilistic scheduling
strategy",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "118--126",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75385",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances in fiber optic technology (viz. its
promise to provide information-carrying capacity in the
Gpbs range over long repeater-free distances) has
triggered tremendous activity in the study of
unidirectional bus networks (because signal flow in the
fiber is unidirectional). A popular network structure
that has received significant attention is the Dual-bus
Unidirectional Broadcast System (DUBS) network
topology. Most of the access mechanism studied on this
structure are based on round-robin scheduling (or some
variation thereof). However since round-robin schemes
suffer a loss of channel capacity because of their
inter-round overhead (which can be significant for long
high-speed buses), a probabilistic scheduling strategy,
called pi-persistent protocol, has recently been
proposed and studied for single channel unidirectional
bus systems. Our concern here is to apply this
probabilistic scheduling strategy to each bus in DUBS,
and study the corresponding network performance. In so
doing, we allow stations to buffer multiple packets,
represent a station's queue size by a Markov chain
model, and employ an independence assumption. We find
that the average packet delay is bounded and the
maximum network throughput approaches two pkt/slot with
increasing buffer size. Further, the protocol's
performance is insensitive to bus characteristics, and
it appears to be a particularly well suited for
fiber-optic network application requiring long
distances and high bandwidth. Simulation results, which
verify the analytical model, are also included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1989:SCP,
author = "A. G. Greenberg and J. McKenna",
title = "Solution of closed, product form, queueing networks
via the {RECAL} and tree-{RECAL} methods on a shared
memory multiprocessor",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "127--135",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75386",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "RECAL is a new recurrence relation for calculating the
partition function and various queue length moments for
closed, product form networks. In this paper we discuss
a number of the issues involved in the software
implementation of RECAL on both sequential computers
and parallel, shared memory computers. After a brief
description of RECAL, we describe software implementing
RECAL on a sequential computer. In particular, we
discuss the problems involved in indexing and data
storage. Next we describe code implementing RECAL on a
parallel, shared memory computer. Special attention is
given to designing a special buffer for temporary data
storage and several other important features of the
parallel code. Finally, we touch on software for serial
and parallel implementations of a tree algorithm for
RECAL.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paterok:1989:FQP,
author = "M. Paterok and O. Fischer and L. Opta",
title = "Feedback queues with preemption-distance priorities",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "136--145",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75387",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The method of moments is used to derive exact
analytical solutions for an open priority queueing
system with preemption-distance priorities and
feedback. Customers enter from outside in a Poisson
stream. They can feed back for several times, changing
priorities and service demands in an arbitrary manner.
During feedback they can fork and branch according to
user-defined probabilities. The service demands of the
different classes are pairwise independent and can be
arbitrarily distributed. A customer who has been
interrupted resumes his service from the point where he
was interrupted (preemptive resume). A system of linear
equations is to be solved to obtain the mean sojourn
times of each customer class in the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:1989:PSQ,
author = "D. B. Wagner and E. D. Lazowska",
title = "Parallel simulation of queueing networks: limitations
and potentials",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "146--155",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75388",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper concerns the parallel simulation of
queueing network models (QNMs) using the conservative
(Chandy--Misra) paradigm. Most empirical studies of
conservative parallel simulation have used QNMs as
benchmarks. For the most part, these studies concluded
that the conservative paradigm is unsuitable for
speeding up the simulation of QNMs, or that it is only
suitable for simulating a very limited subclass of
these models (e.g., those containing only FCFS
servers). In this paper we argue that these are
unnecessarily pessimistic conclusions. On the one hand,
we show that the structure of some QNMs inherently
limits the attainable simulation speedup. On the other
hand, we show that QNMs without such limitations can be
efficiently simulated using some recently introduced
implementation techniques. We present an analytic
method for determining an upper bound on speedup, and
use this method to identify QNM structures that will
exhibit poor simulation performance. We then survey a
number of promising implementation techniques, some of
which are quite general in nature and others of which
apply specifically to QNMs. We show how to extend the
latter to a larger class of service disciplines than
had been considered previously.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:1989:CCP,
author = "D. Mitra and I. Mitrani",
title = "Control and coordination policies for systems with
buffers",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "156--164",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75389",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study systems consisting of a number of service
cells in tandem, each containing a finite buffer.
Several policies governing the operation of such
systems are described and compared. These include
traditional and novel blocking schemes, with
applications to computer communications and production
lines. In particular, it is shown that kanban, a novel
discipline for coordinating cells in a manufacturing
context, is obtained by combining two, more basic,
concepts: a blocking policy introduced here as minimal
blocking, and shared buffers. The Kanban discipline is
superior in terms of throughput to the ordinary
transfer blocking policy. A method for analyzing
approximately the performance of the Kanban system is
also presented. This is based on examining first a
single cell in isolation and then combining the
isolated cells through fixed-point equations. Some
numerical results and comparisons with simulations are
included.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1989:AMP,
author = "D. M. Nicol and J. C. Townsend",
title = "Accurate modeling of parallel scientific
computations",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "165--170",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75390",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scientific codes are usually parallelized by
partitioning a grid among processors. To achieve top
performance it is necessary to partition the grid so as
to balance workload and minimize
communication/synchronization costs. This problem is
particularly acute when the grid is irregular, changes
over the course of the computation, and is not known
until load-time. Critical mapping and remapping
decisions rest on our ability to accurately predict
performance, given a description of a grid and its
partition. This paper discusses one approach to this
problem, and illustrates its use on a one-dimensional
fluids code. The models we construct are shown
empirically to be accurate, and are used to find
optimal remapping schedules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:1989:CPA,
author = "K. C. Sevcik",
title = "Characterizations of parallelism in applications and
their use in scheduling",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "171--180",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75391",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As multiprocessors with large numbers of processors
become more prevalent, we face the task of developing
scheduling algorithms for the multiprogrammed use of
such machines. The scheduling decisions must take into
account the number of processors available, the overall
system load, and the ability of each application
awaiting activation to make use of a given number of
processors. The parallelism within an application can
be characterized at a number of different levels of
detail. At the highest level, it might be characterized
by a single parameter (such as the proportion of the
application that is sequential, or the average number
of processors the application would use if an unlimited
number of processors were available). At the lowest
level, representing all the parallelism in the
application requires the full data dependency graph
(which is more information than is practically
manageable). In this paper, we examine the quality of
processor allocation decisions under multiprogramming
that can be made with several different high-level
characterizations of application parallelism. We
demonstrate that decisions based on parallelism
characterizations with two to four parameters are
superior to those based on single-parameter
characterizations (such as fraction sequential or
average parallelism). The results are based
predominantly on simulation, with some guidance from a
simple analytic model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:1989:ART,
author = "R. D. Nelson and T. K. Philips",
title = "An approximation to the response time for shortest
queue routing",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "181--189",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75392",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we derive an approximation for the mean
response time of a multiple queue system in which
shortest queue routing is used. We assume there are $K$
identical queues with infinite capacity and service
times that are exponentially distributed. Arrivals of
jobs to this system are Poisson and are routed to a
queue of minimal length. We develop an approximation
which is based on both theoretical and experimental
considerations and, for $ K \leq 8$, has a relative
error of less than one half of one percent when
compared to simulation. For $ K = 16$, the relative
error is still acceptable, being less than 2 percent.
An application to a model of parallel processing and a
comparison of static and dynamic load balancing schemes
are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raatikainen:1989:ART,
author = "K. E. E. Raatikainen",
title = "Approximating response time distributions",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "190--199",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75108.75393",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The response time is the most visible performance
index to users of computer systems. End-users see
individual response times, not the average. Therefore
the distribution of response times is important in
performance evaluation and capacity planning studies.
However, the analytic results cannot be obtained in
practical cases. A new method is proposed to
approximate the response-time distribution. Unlike the
previous methods the proposed one takes into account
the service-time distributions and routing behaviour.
The reported results indicate that the method provides
reasonable approximations in many cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:1989:CND,
author = "D. Mitra and A. Weiss",
title = "A closed network with a discriminatory
processor-sharing server",
journal = j-SIGMETRICS,
volume = "17",
number = "1",
pages = "200--208",
month = may,
year = "1989",
CODEN = "????",
DOI = "https://doi.org/10.1145/75372.75394",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:08:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper gives a simple, accurate first order
asymptotic analysis of the transient and steady state
behavior of a network which is closed, not product-form
and has multiple classes. One of the two nodes of the
network is an infinite server and the discipline in the
other node is discriminatory processor-sharing.
Specifically, if there are $ n_j $ jobs of class $j$ at
the latter node, then each class $j$ job receives a
fraction $ w_j / (\Lambda w_i n_i)$ of the processor
capacity. This work has applications to data networks.
For the asymptotic regime of high loading of the
processor and high processing capacity, we derive the
explicit first order transient behavior of the means of
queue lengths. We also give explicit expressions for
the steady state mean values and a simple procedure for
finding the time constants (eigenvalues) that govern
the approach to steady state. The results are based on
an extension of Kurtz's theorem concerning the fluid
limit of Markov processes. Some numerical experiments
show that the analysis is quite accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glew:1990:EII,
author = "Andy Glew",
title = "An empirical investigation of {OR} indexing",
journal = j-SIGMETRICS,
volume = "17",
number = "2",
pages = "41--49",
month = jan,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/378893.378896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers OR indexing as a substitute for,
or an optimization of, addition in an addressing mode
for a high speed processor. OR indexing is evaluated in
the context of existing address streams, using time
based sampling, and through compiler modifications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gunther:1990:PP,
author = "N. J. Gunther",
title = "Performance pathways",
journal = j-SIGMETRICS,
volume = "17",
number = "2",
pages = "50--56",
month = jan,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/378893.378898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We review the status of some recent results in the
performance analysis of computer systems which are
intrinsically unstable due to the presence of more than
one stable operating state. In particular, we consider
bistable computer systems which possess two stable
states: the typical operating point and an another
stable point, concomitant with degraded system
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gonzales:1990:CHL,
author = "Michael G. Gonzales",
title = "Correction of the {Halstead} length estimator skew for
small {Pascal} programs",
journal = j-SIGMETRICS,
volume = "17",
number = "2",
pages = "57--59",
month = jan,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/378893.378899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of studies have confirmed the length
dependent skewness of Halstead's Software Science
length estimator. This paper examines the skewness for
small Pascal programs. A new model developed by
Nicholas Beser in 1983 corrects the length dependent
skew. The parameters for this model as applied to small
Pascal programs are obtained in the paper. Verification
of the correction of skewness, along with a comparison
of the variability of the two estimators, are also
examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1990:UPS,
author = "Albert G. Greenberg and Boris D. Lubachevsky and Isi
Mitrani",
title = "Unboundedly parallel simulations via recurrence
relations",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "1--12",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New methods are presented for parallel simulation of
discrete event systems that, when applicable, can
usefully employ a number of processors much larger than
the number of objects in the system being simulated.
Abandoning the distributed event list approach, the
simulation problem is posed using recurrence relations.
We bring three algorithmic ideas to bear on parallel
simulation: parallel prefix computation, parallel
merging, and iterative folding. Efficient parallel
simulations are given for (in turn) the G/G/1 queue, a
variety of queueing networks having a global first come
first served structure (e.g., a series of queues with
finite buffers), acyclic networks of queues, and
networks of queues with feedbacks and cycles. In
particular, the problem of simulating the arrival and
departure times for the first $N$ jobs to a single
G/G/1 queue is solved in time proportional to $ N / P +
\log P$ using $P$ processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nelson:1990:PEG,
author = "Randolph Nelson",
title = "A performance evaluation of a general parallel
processing model",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "13--26",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98495",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we analyze a model of a parallel
processing system. In our model there is a single queue
which is $ K \geq 1 $ identical processors. Jobs are
assumed to consist of a sequence of barrier
synchronizations where, at each step, the number of
tasks that must be synchronized is random with a known
distribution. An exact analysis of the model is
derived. The model leads to a rich set of results
characterizing the performance of parallel processing
systems. We show that the number of jobs concurrently
in execution, as well as the number of synchronization
variables, grows linearly with the load of the system
and strongly depends on the average number of parallel
tasks found in the workload. Properties of expected
response time or such systems are extensively analyzed
and, in particular, we report on some non-obvious
response time behavior that arises as a function of the
variance of parallelism found in the workload. Based on
exact response time analysis, we propose a simple
calculation that can be used as a rule of thumb to
predict speedups. This can be viewed as a
generalization of Amdahl's law that includes queueing
effects. This generalization is reformulated when
precise workloads cannot be characterized, but rather
when only the fraction or sequential work and the
average number of parallel tasks arc assumed to be
known.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1990:ETD,
author = "Wen-Hann Wang and Jean-Loup Baer",
title = "Efficient trace-driven simulation method for cache
performance analysis",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "27--36",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose improvements to current trace-driven cache
simulation methods to make them faster and more
economical. We attack the large time and space demands
of cache simulation in two ways. First, we reduce the
program traces to the extent that exact performance can
still be obtained from the reduced traces. Second, we
devise an algorithm that can produce performance
results for a variety of metrics (hit ratio, write-back
counts, bus traffic) for a large number of
set-associative write-back caches in just a single
simulation run. The trace reduction and the efficient
simulation techniques are extended to parallel
multiprocessor cache simulations. Our simulation
results show that our approach substantially reduces
the disk space needed to store the program traces and
can dramatically speedup cache simulations and still
produce the exact results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eggers:1990:TEI,
author = "S. J. Eggers and David R. Keppel and Eric J. Koldinger
and Henry M. Levy",
title = "Techniques for efficient inline tracing on a
shared-memory multiprocessor",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "37--47",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While much current research concerns multiprocessor
design, few traces of parallel programs are available
for analyzing the effect of design trade-offs. Existing
trace collection methods have serious drawbacks:
trap-driven methods often slow down program execution
by more than 1000 times, significantly perturbing
program behavior; microcode modification is faster, but
the technique is neither general nor portable. This
paper describes a new tool, called MPTRACE, for
collecting traces of multithreaded parallel programs
executing on shared-memory multiprocessors. MPTRACE
requires no hardware or microcode modification; it
collects complete program traces; it is portable; and
it reduces execution-time dilation to less than a
factor 3. MPTRACE is based on inline tracing, in which
a program is automatically modified to produce trace
information as it executes. We show how the use of
compiler flow analysis techniques can reduce the amount
of data collected and therefore the runtime dilation of
the traced program. We also discuss problematic issues
concerning buffering and writing of trace data on a
multiprocessor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Agarwal:1990:BES,
author = "Anant Agarwal and Minor Huffman",
title = "Blocking: exploiting spatial locality for trace
compaction",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "48--57",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trace-driven simulation is a popular method of
estimating the performance of cache memories,
translation lookaside buffers, and paging schemes.
Because the cost of trace-driven simulation is directly
proportional to trace length, reducing the number of
references in the trace significantly impacts
simulation time. This paper concentrates on trace
driven simulation for cache miss rate analysis.
Previous schemes, such as cache filtering, exploited
temporal locality for compressing traces and could
yield an order of magnitude reduction in trace length.
A technique called blocking and a variant called
blocking with temporal data are presented that compress
traces by exploiting spatial locality. Experimental
results show that blocking filtering combined with
cache filtering can reduce trace length by nearly two
orders of magnitude while introducing about 10\% error
in cache miss rate estimates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1990:BAF,
author = "Tein-Hsiang Lin and Kang G. Shin",
title = "A {Bayesian} approach to fault classification",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "58--66",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98505",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "According to their temporal behavior, faults in
computer systems are classified into permanent,
intermittent, and transient faults. Since it is
impossible to identify the type of a fault upon its
first detection, the common practice is to retry the
failed instruction one or more times and then use other
fault recovery methods, such as rollback or restart, if
the retry is not successful. To determine an
``optimal'' (in some sense) number of retries, we need
to know several fault parameters, which can be
estimated only after classifying all the faults
detected in the past. In this paper we propose a new
fault classification scheme which assigns a fault type
to each detected fault based on its detection time, the
outcome of retry, and its detection symptom. This
classification procedure utilizes the Bayesian decision
theory to sequentially update the estimation of fault
parameters whenever a detected fault is classified. An
important advantage of this classification is the early
identification of presence of an intermittent fault so
that appropriate measures can be taken before it causes
a serious damage to the system. To assess the goodness
of the proposed scheme, the probability of incorrect
classification is also analyzed and compared with
simulation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moser:1990:PLA,
author = "Louise E. Moser and Vikas Kapur and P. M.
Melliar-Smith",
title = "Probabilistic language analysis of weighted voting
algorithms",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "67--73",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a method of analyzing the performance of
weighted voting algorithms in a fault-tolerant
distributed system. In many distributed systems, some
processors send messages more frequently than others
and all processors share a common communication medium,
such as an Ethernet. Typical fault-tolerant voting
algorithms require that a certain minimum number of
votes be collected from different processors. System
performance is significantly affected by the time
required to collect those votes. We formulate the
problem of weighted voting in terms of probabilistic
languages and then use the calculus of generating
functions to compute the expected delay to collect that
number of votes. An application of the method to a
particular voting algorithm, the Total protocol, is
given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1990:ERA,
author = "Peter M. Chen and Garth A. Gibson and Randy H. Katz
and David A. Patterson",
title = "An evaluation of redundant arrays of disks using an
{Amdahl 5890}",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "74--85",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently we presented several disk array architectures
designed to increase the data rate and I/O rate of
supercomputing applications, transaction processing,
and file systems [Patterson 88]. In this paper we
present a hardware performance measurement of two of
these architectures, mirroring and rotated parity. We
see how throughput for these two architectures is
affected by response time requirements, request sizes,
and read to write ratios. We find that for applications
with large accesses, such as many supercomputing
applications, a rotated parity disk array far
outperforms traditional mirroring architecture. For
applications dominated by small accesses, such as
transaction processing, mirroring architectures have
higher performance per disk than rotated parity
architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:1990:SAF,
author = "Amarnath Mukherjee and Lawrence H. Landweber and John
C. Strikwerda",
title = "Simultaneous analysis of flow and error control
strategies with congestion-dependent errors",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "86--95",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1990:QAA,
author = "Arthur Y. M. Lin and John A. Silvester",
title = "Queueing analysis of an {ATM} switch with multichannel
transmission groups",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "96--105",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98514",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The discrete-time D[A]/D/c/B queueing system is
studied. We consider both a bulk arrival process with
constant bulk inter-arrival time ($D$) and general
bulk-size distribution ($A$) and a periodic arrival
process ($ D_1 + \cdots + D_N$). The
service/transmission times are deterministic ($D$) and
the system provides for a maximum of $c$ servers with a
buffer size $B$. The motivation for studying this
queueing system is its application in performance
modeling and analysis of an asynchronous transfer mode
(ATM) switch with multichannel transmission groups.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Johnson:1990:AAR,
author = "Theodore Johnson",
title = "Approximate analysis of reader and writer access to a
shared resource",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "106--114",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98517",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a queue that has two classes
of customers: readers and writers. Readers access the
resource concurrently and writers access the resource
serially. The queue discipline is FCFS: readers must
wait until all writers that arrived earlier have
completed service, and vice versa. The approximation
can predict both the expected waiting times for readers
and writers and the capacity of the queue. The queue
can be used for the analysis of operating system and
software resources that can be accessed both serially
and concurrently, such as shared files. We have used
the queue to analyze the performance of concurrent
B-tree algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anderson:1990:QTT,
author = "Thomas E. Anderson and Edward D. Lazowska",
title = "{Quartz}: a tool for tuning parallel program
performance",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "115--125",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98518",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Initial implementations of parallel programs typically
yield disappointing performance. Tuning to improve
performance is thus a significant part of the parallel
programming process. The effort required to tune a
parallel program, and the level of performance that
eventually is achieved, both depend heavily on the
quality of the instrumentation that is available to the
programmer. This paper describes Quartz, a new tool for
tuning parallel program performance on shared memory
multiprocessors. The philosophy underlying Quartz was
inspired by that of the sequential UNIX tool gprof: to
appropriately direct the attention of the programmer by
efficiently measuring just those factors that are most
responsible for performance and by relating these
metrics to one another and to the structure of the
program. This philosophy is even more important in the
parallel domain than in the sequential domain, because
of the dramatically greater number of possible metrics
and the dramatically increased complexity of program
structures. The principal metric of Quartz is
normalized processor time: the total processor time
spent in each section of code divided by the number of
other processors that are concurrently busy when that
section of code is being executed. Tied to the logical
structure of the program, this metric provides a
``smoking gun'' pointing towards those areas of the
program most responsible for poor performance. This
information can be acquired efficiently by
checkpointing to memory the number of busy processors
and the state of each processor, and then statistically
sampling these using a dedicated processor. In addition
to describing the design rationale, functionality, and
implementation of Quartz, the paper examines how Quartz
would be used to solve a number of performance problems
that have been reported as being frequently
encountered, and describes a case study in which Quartz
was used to significantly improve the performance of a
CAD circuit verifier.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pattipati:1990:CVA,
author = "Krishna R. Pattipati and Joel Wolf and Somnath Deb",
title = "A calculus of variations approach to file allocation
problems in computer systems",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "126--133",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98522",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is concerned with the parameter
optimization in closed product-form queueing networks.
Our approach is to combine the techniques of the
calculus of variations with the mean value analysis
(MVA) recursion of closed queueing networks. We view
the MVA recursion as nonlinear difference equations
describing a multi-stage system, wherein a stage
corresponds to the network population, and the response
times at each node constitute the state variables of
the multi-stage system. This viewpoint leads to a
two-point boundary value problem, in which the forward
system corresponds to the MVA recursion and the
backward system corresponds to an MVA-like adjoint
recursion. The method allows for a very general class
of objective functions, and the adjoint equations
provide the necessary information to compute the
gradient of the cost function. The optimization problem
can then be solved by any of the gradient-based
methods. For the special case when the objective
function is the network delay function, the gradient
vector is shown to be related to the moments of the
queue lengths. In addition, the adjoint vector offers
the potential for the on-line adaptive control of
queueing networks based on the state information (e.g.,
actual degree of multi-programming, response times at
the devices.) The theory is illustrated via application
to the problem of determining the optimal disk routing
probabilities in a large scale, modern I/O
(Input/Output) subsystem. A subsequent paper will deal
with extensions of the theory to multi-class
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Robinson:1990:DCM,
author = "John T. Robinson and Murthy V. Devarakonda",
title = "Data cache management using frequency-based
replacement",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "134--142",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new frequency-based replacement algorithm
for managing caches used for disk blocks by a file
system, database management system, or disk control
unit, which we refer to here as data caches.
Previously, LRU replacement has usually been used for
such caches. We describe a replacement algorithm based
on the concept of maintaining reference counts in which
locality has been ``factored out''. In this algorithm
replacement choices are made using a combination of
reference frequency and block age. Simulation results
based on traces of file system and I/O activity from
actual systems show that this algorithm can offer up to
34\% performance improvement over LRU replacement,
where the improvement is expressed as the fraction of
the performance gain achieved between LRU replacement
and the theoretically optimal policy in which the
reference string must be known in advance. Furthermore,
the implementation complexity and efficiency of this
algorithm is comparable to one using LRU replacement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dan:1990:AAL,
author = "Asit Dan and Don Towsley",
title = "An approximate analysis of the {LRU} and {FIFO} buffer
replacement schemes",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "143--152",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98525",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we develop approximate analytical
models for predicting the buffer hit probability under
the Least Recently Used (LRU) and First In First Out
(FIFO) buffer replacement policies under the
independent reference model. In the case of the
analysis of the LRU policy, the computational
complexity for estimating the buffer hit probability is
$ O(K B) $ where $B$ is the size of the buffer and $K$
denotes the number of items having distinct access
probabilities. In the case of the FIFO policy, the
solution algorithm is iterative and the computational
complexity of each iteration is $ O(K)$. Results from
these models are compared to exact results for models
originally developed by King [KING71] for small values
of the buffer size, $B$, and the total number of items
sharing the buffer, $D$. Results are also compared with
results from a simulation for large values of $B$ and
$D$. In most cases, the error is extremely small (less
than 0.1\%) for both LRU and FIFO, and a maximum error
of 3\% is observed for very small buffer size (less
than 5) when the access probabilities are extremely
skewed. To demonstrate the usefulness of the model, we
consider two applications. In our first application, we
compare the LRU and FIFO policies to an optimal static
buffer allocation policy for a database consisting of
two classes of data items. We observe that the
performance of LRU is close to that of the optimal
allocation. As the optimal allocation requires
knowledge of the access probabilities, the LRU policy
is preferred when this information is unavailable. We
also observe that the LRU policy always performs better
than the FIFO policy in our experiments. In our second
application, we show that if multiple independent
reference streams on mutually disjoint sets of data
compete for the same buffer, it is better to partition
the buffer using an optimal allocation policy than to
share a common buffer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alonso:1990:AFW,
author = "Raphael Alonso and Andrew W. Appel",
title = "An advisor for flexible working sets",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "153--162",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The traditional model of virtual memory working sets
does not account for programs that can adjust their
working sets on demand. Examples of such programs are
garbage-collected systems and databases with block
cache buffers. We present a memory-use model of such
systems, and propose a method that may be used by
virtual memory managers to advise programs on how to
adjust their working sets. Our method tries to minimize
memory contention and ensure better overall system
response time. We have implemented a memory ``advice
server'' that runs as a non-privileged process under
Berkeley Unix. User processes may ask this server for
advice about working set sizes, so as to take maximum
advantage of memory resources. Our implementation is
quite simple, and has negligible overhead, and
experimental results show that it results in sizable
performance improvements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Torrellas:1990:ACA,
author = "Joseph Torrellas and John Hennessy and Thierry Weil",
title = "Analysis of critical architectural and programming
parameters in a hierarchical",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "163--172",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable shared-memory multiprocessors are the subject
of much current research, but little is known about the
performance behavior of these machines. This paper
studies the performance effects of two machine
characteristics and two program characteristics that
seem to be major factors in determining the performance
of a hierarchical shared-memory machine. We develop an
analytical model of the traffic in a machine loosely
based on Stanford's DASH multiprocessor and use program
parameters extracted from multiprocessor traces to
study its performance. It is shown that both locality
in the data reference stream and the amount of data
sharing in a program have an important impact on
performance. Although less obvious, the bandwidth
within each cluster in the hierarchy also has a
significant performance effect. Optimizations that
improve the intracluster cache coherence protocol or
increase the bandwidth within a cluster can be quite
effective.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jog:1990:PEC,
author = "Rajeev Jog and Philip L. Vitale and James R.
Callister",
title = "Performance evaluation of a commercial cache-coherent
shared memory multiprocessor",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "173--182",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an approximate Mean Value
Analysis (MVA) model developed to project the
performance of a small-scale shared-memory commercial
symmetric multiprocessor system. The system, based on
Hewlett Packard Precision Architecture processors,
supports multiple active user processes and multiple
execution threads within the operating system. Using
detailed timing for hardware delays, a customized
approximate closed queueing model is developed for the
multiprocessor system. The model evaluates delays due
to bus and memory contention, and cache interference.
It predicts bus bandwidth requirements and utilizations
for the bus and memory controllers. An extension to
handle I/O traffic is outlined. Applications are
profiled on the basis of execution traces on
uniprocessor systems to provide inputs parameters for
the model. Performance effects of various detailed
architectural tradeoffs (memory interleaving, lower
memory latencies) are examined. The sensitivity of
overall system performance to various parameters is
explored. Preliminary measurements of uniprocessor
systems are compared against the model predictions. A
prototype multiprocessor system is under development.
We intend to validate the modeling results against
measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1990:PAC,
author = "Erol Gelenbe",
title = "Performance analysis of the {Connection Machine}",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "183--191",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98757",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an analysis of the performance of
the Connection Machine, with special emphasis on
estimating the effect of its interprocessor
communication architecture. A queueing model of the
network architecture, including the NEWS and ROUTER
networks, is used to compute the slow-down induced by
message exchange between processors. Locality of the
message exchanges is modelled by message sending
probabilities which depend on whether a message is sent
by a processor to another processor placed on the same
NEWS network, or on the same ROUTER, or at a ``remote''
location which is only accessible via the ROUTER
network. The specific slotted TDMA structure of the
ROUTER Network communications is taken into account.
The performance degradation of the Connection Machine
as a function of the communication and architecture
parameters is derived.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Willick:1990:AMM,
author = "Darryl L. Willick and D. L. Eager",
title = "An analytic model of multistage interconnection
networks",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "192--202",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98758",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiprocessors require an interconnection network to
connect processors with memory modules. The performance
of the interconnection network can have a large effect
upon overall system performance, and, therefore,
methods are needed to model and compare alternative
network architectures. This paper is concerned with
evaluating the performance of multistage
interconnection networks consisting of $ k \times s $
switching elements. Examples of such networks include
omega, binary $n$-cube and baseline networks. We
consider clocked, packet switched networks with buffers
at switch output ports. An analytical model based on
approximate Mean Value Analysis is developed, then
validated through simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dussa:1990:DPT,
author = "K. Dussa and B. Carlson and L. Dowdy and K.-H. Park",
title = "Dynamic partitioning in a transputer environment",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "203--213",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98759",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel programs are characterized by their speedup
behavior. As more processors are allocated to a
particular parallel program, the program (potentially)
executes faster. However, there is often a point of
diminishing returns, beyond which extra allocated
processors cannot be used effectively. Extra processors
would be better utilized by allocating them to another
program. Thus, given a set of processors in a
multiprocessor system, and a set of parallel programs,
a partitioning problem naturally arises which seeks to
allocate processors to programs optimally. The problem
addressed in this paper is dynamic partitioning. When
the number of executable parallel programs changes, the
optimal partition sizes also change. To realize the new
partition settings, a dynamic repartitioning of all
processors is triggered. When extra processors suddenly
become available to a running program due to a program
departure, or when processors suddenly are taken away
from a running program due to a program arrival, a
nontrivial repartitioning overhead occurs. Depending
upon the specific environment, this overhead cost may
negate any potential repartitioning benefit. To gain
insight into this dynamic partitioning problem, a
specific system, a specific workload, and a specific
analytical model are studied. The specific system is an
INMOS transputer system consisting of an IIP Vectra
front-end, an INMOS B004 evaluation board with a single
T414 transputer, and an EB8-10 board with eight T800
transputers. The specific workload consists of parallel
versions of a classical N-body problem and a classical
search problem. The specific analytical model is a
Markov model which is parameterized using the concept
of program execution signatures. The sensitivity
analysis experiments both validate the model and
indicate the characteristics of those workloads which
benefit from dynamic partitioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zahorjan:1990:PSS,
author = "John Zahorjan and Cathy McCann",
title = "Processor scheduling in shared memory
multiprocessors",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "214--225",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98760",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing work indicates that the commonly used
``single queue of runnable tasks'' approach to
scheduling shared memory multiprocessors can perform
very poorly in a multiprogrammed parallel processing
environment. A more promising approach is the class of
``two-level schedulers'' in which the operating system
deals solely with allocating processors to jobs while
the individual jobs themselves perform task dispatching
on those processors. In this paper we compare two basic
varieties of two-level schedulers. Those of the first
type, static, make a single decision per job regarding
the number of processors to allocate to it. Once the
job has received its allocation, it is guaranteed to
have exactly that number of processors available to it
whenever it is active. The other class of two-level
scheduler, dynamic, allows each job to acquire and
release processors during its execution. By responding
to the varying parallelism of the jobs, the dynamic
scheduler promises higher processor utilizations at the
cost of potentially greater scheduling overhead and
more complicated application level task control
policies. Our results, obtained via simulation,
highlight the tradeoffs between the static and dynamic
approaches. We investigate how the choice of policy is
affected by the cost of switching a processor from one
job to another. We show that for a wide range of
plausible overhead values, dynamic scheduling is
superior to static scheduling. Within the class of
static schedulers, we show that, in most cases, a
simple ``run to completion'' scheme is preferable to a
round-robin approach. Finally, we investigate different
techniques for tuning the allocation decisions required
by the dynamic policies and quantify their effects on
performance. We believe our results are directly
applicable to many existing shared memory parallel
computers, which for the most part currently employ a
simple ``single queue of tasks'' extension of basic
sequential machine schedulers. We plan to validate our
results in future work through implementation and
experimentation on such a system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leutenegger:1990:PMM,
author = "Scott T. Leutenegger and Mary K. Vernon",
title = "The performance of multiprogrammed multiprocessor
scheduling algorithms",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "226--236",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling policies for general purpose
multiprogrammed multiprocessors are not well
understood. This paper examines various policies to
determine which properties of a scheduling policy are
the most significant determinants of performance. We
compare a more comprehensive set of policies than
previous work, including one important scheduling
policy that has not previously been examined. We also
compare the policies under workloads that we feel are
more realistic than previous studies have used. Using
these new workloads, we arrive at different conclusions
than reported in earlier work. In particular, we find
that the ``smallest number of processes first'' (SNPF)
scheduling discipline performs poorly, even when the
number of processes in a job is positively correlated
with the total service demand of the job. We also find
that policies that allocate an equal fraction of the
processing power to each job in the system perform
better, on the whole, than policies that allocate
processing power unequally. Finally, we find that for
lock access synchronization, dividing processing power
equally among all jobs in the system is a more
effective property of a scheduling policy than the
property of minimizing synchronization spin-waiting,
unless demand for synchronization is extremely high.
(The latter property is implemented by coscheduling
processes within a job, or by using a thread management
package that avoids preemption of processes that hold
spinlocks.) Our studies are done by simulating abstract
models of the system and the workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dawkins:1990:ESM,
author = "W. P. Dawkins and V. Debbad and J. R. Jump and J. B.
Sinclair",
title = "Efficient simulation of multiprogramming",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "237--238",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1990:MFC,
author = "Scott Shenker",
title = "Making flow control work in networks: a
control-theoretic analysis of gateway service
disciplines",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "239--240",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenker:1990:MGW,
author = "Scott Shenker",
title = "Making greed work in networks: a game-theoretic
analysis of gateway service disciplines",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "241--242",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghandeharizadeh:1990:FAP,
author = "Shahram Ghandeharizadeh and David J. DeWitt",
title = "Factors affecting the performance of multiuser
database management systems",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "243--244",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98765",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While in the past 20 years database management systems
(DBMS) have become a critical component of almost all
organizations, their behavior in a multiuser
environment has surprisingly not been studied
carefully. In order to help us understand the multiuser
performance of the multiprocessor Gamma database
machine [DEWI90], we began by studying the performance
of a single processor version of this system. In this
paper, we describe some of the factors that affect the
performance of DBMS in a multiuser environment. We
refer the interested reader to [GHAN90] for more
details. For these experiments, the Gamma software was
run on a VAX 11/750 with 2 megabytes of memory and a
330 megabyte Fujitsu disk drive. An 8 Kbyte disk page
was used and the buffer pool was set at 61 pages. A
second processor was used to simulate users submitting
queries. In a DBMS, queries can be classified according
to their pattern of disk accesses. Those that either
sequentially scan all the pages of a relation or use a
clustered index to retrieve only those pages containing
tuples that satisfy a selection predicate, access the
disk sequentially. Queries that use a non-clustered
index to process a query tend to access disk pages
randomly. For those queries that access the disk
sequentially, it is very important to avoid random disk
accesses in presence of multiple, concurrently
executing queries. Consider a query that selects 1
tuple from a 12,500 tuple relation (each tuple is 208
bytes long) by sequentially scanning the relation. As
shown in Figure 1, as the multiprogramming level (MPL)
is increased from 1 to 2, the throughput of the system
actually decreases. In the case of a high degree of
data sharing, the two concurrently executing queries
will generally access the same relation (out of a set
of 10 identical relations). However, this does not
necessarily mean that these queries are sufficiently
synchronized to share pages in the buffer pool. The
result is that the disk ends up performing a series of
random disk requests instead of a series of sequential
disk requests had each query been submitted
consecutively. The random disk requests result in a
higher average seek time. As shown in Figure 1, the
drop in throughput is largest for the low degree of
data sharing as the two concurrently executing queries
may access any relation in the database. Thus, on the
average the head of the disk must travel a longer
distance on each disk access and since the average seek
time increases as a function of the square root of the
distance traveled by the head of the disk, the average
service time of the disk is higher. To further
illustrate the complex behavior that a database system
can exhibit, consider a range selection query that uses
a non-clustered index to select 15 tuples out of a
12,500 tuple relation. Since with a non-clustered index
the order of index records is not the same as the order
of the tuples in the indexed relation, each tuple
retrieved results in a random disk I/O. As shown in
Figure 2, the throughput of the system is highest for
the high degree of data sharing because when a query
commits and its corresponding terminal submits a new
query, the new query will generally access the same
relation as the previous query. The result is that the
required index pages will generally be resident in the
buffer pool. On the other hand, the probability that
the newly submitted query will access the same relation
as the previous query is much lower with the low and
medium degrees of data sharing than with the high
degree of data sharing. Furthermore, since each query
processes a large number of pages, the execution of one
query tends to flush the buffer pool of pages from some
previously accessed relation resulting in a very low
percentage of buffer pool hits for subsequent queries
as illustrated in Figure 3. For each of the degrees of
data sharing, the throughput of the system increases
from a multiprogramming level (MPL) of one to twelve.
But observe from Figure 4 that the disk becomes 100\%
utilized at a MPL of four. The reason that the
throughput continues to increases from a MPL of 4 to 12
is because the disk controller utilizes an elevator
algorithm when more than two disk requests are pending
and consequently enforces some locality of reference on
the random disk accesses. The result is that the
average seek time decreases. At MPLs higher than
twelve, the throughput of the system begins to decrease
for each of the degrees of data sharing due to the
decrease in percentage of buffer pool hits (see Figure
3). Recall that all the disk requests made by this
query type are random and that the buffer pool utilizes
an LRU replacement policy for all the pages (index +
data). At MPLs higher than twelve, the data pages begin
to compete with index pages for the buffer pool
resulting in a decrease in the percentage of buffer
pool hits. In addition, this increases the load on the
disk and reduces the load on the CPU resulting in a
drop in CPU as shown in Figure 5. Other factors that
affect the performance of a DBMS include the use of a
software read-ahead mechanism and the availability of a
hardware disk cache. We have observed up to a 30\%
improvement in throughput with a software read-ahead
mechanism. Its benefits, however, diminish when the
disk becomes 100\% utilized. While a track-size
hardware disk cache is extremely beneficial for
sequential scan queries executing by themselves, such a
mechanism provides only very marginal benefits in a
multiuser environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Englert:1990:BNS,
author = "Susanne Englert and Jim Gray and Terrye Kocher and
Praful Shah",
title = "A benchmark of {NonStop SQL release 2} demonstrating
near-linear speedup and scaleup on large databases",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "245--246",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98766",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Somani:1990:PMR,
author = "Arun K. Somani and James A. Ritcey and Stephen H. L.
Au",
title = "Phased mission reliability analysis",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "247--248",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitchell:1990:PAF,
author = "Lionel C. Mitchell",
title = "Performance analysis of a fault tolerant computer
system",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "249--250",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98769",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the description of an analytical
queueing network model of a Tandem computer system in
the FAA Remote Maintenance Monitoring environment and a
performance analysis of the Maintenance Processor
Subsystem for the 1990s time frame. The approach was to
use measurement data to quantify application service
demands and performance contributions of the
fault-tolerant software in the Tandem environment in an
analytical queueing network model. Sensitivity analyses
were conducted using the model to examine alternative
configurations, workload growth, and system overhead
among others. The model framework and performance
analysis methodology can be used for capacity planning
purposes during the operational phase of the system.
The Federal Aviation Administration (FAA) is
responsible for the many critical functions of the
National Airspace System (NAS). Many of these functions
have very high availability requirements. One such
function is Remote Maintenance Monitoring (RMM). The
FAA has implemented prototype versions of portions of
this system on the Tandem fault tolerant computer
architecture. The Maintenance Processor Subsystem (MPS)
implements monitor/control and management information
functions within FAA's Remote Maintenance Monitoring
System (RMMS). MPSs are located at 23 Air Route Traffic
Control Centers (ARTCC) and various other FAA sites.
These computers remotely monitor and control sensors.
The RMMS components are in various stages of
development. The MPS currently consists of a
multi-processor Tandem configuration with initial
versions of the monitor/control and management
information software. Only a small number of remote
sensors are currently monitored via point-to-point
communication links. The performance evaluation of the
FAA's MPS involved the following steps: assess the
functional and performance requirements; develop and
validate a baseline model of the MPS prototype Tandem
system; modify the baseline model to represent future
MPS configuration and transaction requirements; and
evaluate predicted performance. The functional and
performance requirements of the MPS were determined
primarily from FAA documentation and personnel.
Performance data from a prototype MPS site at the
Memphis ARTCC, collected by the Tandem XRAY monitor,
were used to quantify model priority, service demand
and workload intensity parameters, and to validate the
baseline model using response time and utilization
metrics. Configuration specification on the Memphis
node was also collected for the use in the model. The
model was developed using the CTA queueing network
package Performance Analysis Tool Box (PATB). The model
of the Tandem computer represents the non-stop
processing operation implemented by Tandem's
Transaction Monitoring Facility (TMF) and the mirrored
disk writing operation. In addition, the model
represents the GUARDIAN operating system priority
scheduler, CPU burst size, interrupt processing, and
memory swapping. The basic modeling approach was to use
measurement data to represent the complex fault
tolerant activities in an analytical queueing network
model. A model of Memphis MPS node was developed to
serve as a baseline for examining the performance of
future ARTCC MPS configurations. The model was
developed using the PATB queueing network tool which
implements a Linearizer mean value analysis algorithm.
The MPS functional and performance requirements and the
XRAY measurement data were used to define the software,
communication, and workload characteristics of the
model. The XRAY measurement data and configuration
information on the Memphis MPS node and Tandem
information were used to define the hardware and system
software characteristics and to quantify the processing
and I/O service demands for the application and system
software. The basic components of the PATB model are:
CPU, disk, and communication link hardware components;
the application and system software program elements
including the fault tolerant functions; and the
application and overhead workload, or transaction,
flows. The local terminals were implicitly represented
as the source of the transactions. The Remote
Monitoring Subsystem (RMS) sensor devices were
represented as transaction sources and sinks. The
interprocessor bus, the device controllers and the I/O
bus were not included in the model. Their contribution
to performance was judged to be insignificant based on
examination of measurement data. The fault-tolerant
check-point functionality of Tandem's Transaction
Monitoring Facility was represented by including the
TMF processing and I/O activities as serial delays on
the transaction flows for application workloads. The
mirrored disk writing was reflected in the I/O service
demand data from XRAY and did not require any further
model representation. Memory contention was modeled in
a separate PATB model. Both models assume a normal
operational scenario (i.e., failure modes are not
modeled). The baseline performance model was validated
using the XRAY data from the Memphis MPS site. The
primary performance metric used in the model validation
was average terminal response time. Model response time
was within 15 percent of measured response time. One
parameter examined in the validation exercise was CPU
burst size. Using average burst size instead of the
operating system maximum provided better agreement of
model results with measured results. The MPS baseline
model was modified to represent different possible MPS
configurations for the 1990s. The changes in the model
reflected additional and faster CPU, disk and
communication servers and modification of software CPU
residency and workload flows. Various alternatives were
examined for hardware and software configuration,
number of sensor devices monitored, terminal
transaction load, and system overhead and application
software service demands. In addition to the detailed
model of the application and system software a
flow-equivalent queueing network model was developed,
using PATB, to examine the impact of memory queueing
for the proposed configuration. The model was developed
to examine the impact of: the operating system policy
of ``cloning'' processes subject to queue length
threshold; additional application software functions
not yet implemented; uncertainty of expected
transaction rate; and additional system software
storage requirements. The results of the analysis are
being used by the FAA to define the MPS performance
requirements for the 1995 time frame. The MPS model may
be used in the future for capacity planning and
performance optimization exercises for different MPS
field configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jensen:1990:RTD,
author = "David W. Jensen and Daniel A. Reed",
title = "Ray tracing on distributed memory parallel systems",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "251--252",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98770",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Among the many techniques in computer graphics, ray
tracing is prized because it can render realistic
images, albeit at great computational expense. In this
note we explore the performance of several approaches
to ray tracing on a distributed memory parallel system.
A set of performance instrumentation tools and their
associated visualization software are used to identify
the underlying causes of performance differences.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mirchandani:1990:CME,
author = "Dinesh Mirchandani and Prabuddha Biswas",
title = "Characterizing and modeling {Ethernet} performance of
distributed {DECwindows} applications",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "253--254",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McGehearty:1990:COPa,
author = "Patrick F. McGehearty",
title = "Challenges in obtaining peak parallel performance with
a {Convex C240}, parallel vector processor",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "255--256",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98773",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This report examines the behavior of the Linpack $ 300
\times 300 $ benchmark [Dongarra] on a parallel vector
machine. It is observed that the performance of several
parallel vector machines on this application is far
below their nominal peak performance. Dissection of the
internals of the algorithms shows how peak performance
is limited. The insights gained provide guidance to
algorithm developers as to ways to make maximum use of
architectural strengths. System architects may gain
insight about which system characteristics to optimize
to increase the performance of future designs for this
class of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heimlich:1990:TCN,
author = "Steven A. Heimlich",
title = "Traffic characterization of the {NSFNET} national
backbone",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "257--258",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98774",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditionally, models of packet arrival in
communication networks have assumed either Poisson or
compound Poisson arrival patterns. A study of a token
ring local area network (LAN) at MIT [5] found that
packet arrival followed neither of these models.
Instead, traffic followed a more general model dubbed
the ``packet train,'' which describes network traffic
as a collection of packet streams traveling between
pairs of nodes. A packet train consists of a number of
packets traveling between a particular node pair. This
study examines the existence of packet trains on
NSFNET, a high speed national backbone network. Train
characteristics on NSFNET are not as striking as those
found on the MIT local network; however, certain
protocols exhibit quite strong train behavior given the
great number of hosts communicating through the
backbone. Descriptions of the packet train model can be
found in [3] and [5].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Davidson:1990:EEA,
author = "Jack W. Davidson and David B. Whalley",
title = "{Ease}: an environment for architecture study and
experimentation",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "259--260",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98775",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gathering detailed measurements of the execution
behavior of an instruction set architecture is
difficult. There are two major problems that must be
solved. First, for meaningful measurements to be
obtained, programs that represent typical work load and
instruction mixes must be used. This means that
high-level language compilers for the target
architecture are required. This problem is further
compounded as most architectures require an optimizing
compiler to exploit their capabilities. Building such a
compiler can be a formidable task. The second problem
is that gathering detailed dynamic measurements of an
architecture using typical user programs reading
typical data sets can consume significant computation
resources. For example, a popular way to gather
execution measurements is to simulate the architecture.
This technique is often used when the architecture in
question does not yet exist, or is not yet stable and
available for production use. Depending on the level of
the simulation, programs can run 100 to 500 times
slower than directly-executed code [HUGU87]. Tracing is
another alternative one can use if the architecture
being measured exists, is accessible, and tracing is
possible on that machine. Tracing can be even slower
than simulation [HUGU87]. Because of the large
performance penalties with these methods, the tendency
is to use small programs with small data sets. The
relevance of measures collected this way is always
subject to question. This paper describes an
environment called ease (Environment for Architecture
Study and Experimentation) that solves both these
problems. It consists of a easily retargetable
optimizing compiler that produces production-quality
code. The compiler also supports the generation of
instrumented code that gathers very fine-grained
execution statistics with little overhead. Typically,
instrumented code runs 10 to 15 percent slower than
code that is not instrumented. Similarly, because
information about instructions are collected as a side
effect of the compiler generating code, compilation
time is only increased by 15 to 20 percent. The
combination of an easily retargetable compiler and an
efficient method of observing the run-time behavior of
real programs provides an environment that is useful in
a number of contexts. ease logically consists of two
parts; the set of tools for building optimizing
compilers quickly and the tools that produce and
analyze the measurements of the execution behavior of
the instruction set architecture. The compiler
technology is known as vpo [BENI88, DAVI84, DAVI86]. An
efficient way to collect measurements for subsequent
analysis is to modify the back end of the compiler to
store the characteristics of the instructions to be
executed and to produce code that will count the number
of times that each instruction is executed. These
modifications have been implemented in vpo and are
shown in Figure 1. The first modification necessary to
collect measurements is to have vpo save the
characteristics of the instructions that will be
executed. During code selection, information about the
characteristics of the instructions are gathered and
used for semantic checks. The semantic checks are
extended to store these characteristics with the
instruction by invoking a machine-independent routine.
After all optimizations have been completed, the
information about each instruction is then written to a
file for subsequent processing. The second modification
is to have vpo generate code to count the number of
times each instruction is executed. Again this is
accomplished after all optimizations have been
performed. Within each function there are groups of
instructions, basic blocks, that are always executed
the same number of times. There are also groups or
classes of basic blocks that are executed the same
number of times and these are denoted as execution
classes. Using the dataflow information collected by
the optimizer, the execution classes are determined and
code to count the number of times that each execution
class is executed is inserted at the beginning of the
first basic block in the execution class. At the end of
the execution of the program, the number of times that
each execution class is executed is written to a file.
The execution counts and the characteristics of the
instructions can then both be used to produce dynamic
measurements. The characteristics of the instructions
can also be used to produce static measurements. ease
has been ported to ten different machines to compare
current architectures. Measurements from the execution
of a test set of nineteen C programs were obtained for
each of the architectures. The detail and accuracy of
the reports produced by ease allowed insights to be
drawn when analyzing the measurements. The measurements
collected include: instruction path length instruction
path size instruction type distribution addressing mode
distribution memory reference size distribution memory
reference address distribution register usage condition
code usage conditional branches taken average number of
instruction between branches data type distribution The
measurements are sufficiently detailed to determine the
number of times each combination of addressing mode and
data type is used for each field of each type of
instruction. Results comparing the ten architectures
analyzed appears in WHAL89. In addition to using ease
to evaluate and analyze existing instruction set
architectures, it can be used to help design new
machines [DAVI89b]. In this case, vpo emits code for an
existing host machine that emulates the instruction set
of the machine being designed. vpo's organization
permits this to be done quickly and easily as follows.
The last step in the compilation process is the
conversion of a machine-independent representation of
an instruction to assembly language for the target
machine and its emission to a file that will be
processed by the system's assembler. In order to
evaluate an architecture that does not exist, rather
than emit assembly code for the target machine,
assembly code for an existing architecture is emitted.
Information about the effects of the instruction are
emitted as if the target architecture existed. ease has
also been used to analyze different code generation
strategies. For instance, by recompiling the source
files from the C run-time library, different calling
sequence conventions have been investigated [DAVI89a].
By extracting measurements of the behavior of the code,
the effect of any change can be easily observed. This
environment for the collection of architectural
measurements has been designed to require little effort
when retargeting for a new architecture. Since the code
selector and other optimizations are constructed
automatically, a vpo-based compiler is easy to
retarget. Because the optimizer stores information
about instructions using a machine-independent
representation, it is easy to produce assembly code for
both existing and proposed architectures and to store
instruction information for the collection of
measurements. Most of the code to perform the
extraction of measurements is also machine-independent.
A vpo-based C compiler for ten different machines was
modified to collect measurements as specified above.
For each machine, it typically took three to four hours
to make the necessary machine-dependent modifications
to the compiler. The ease environment has been shown to
be an efficient tool for architectural evaluation and
design. Since accurate and detailed reports can be
produced for a variety of measurements, the impact of
each modification to the compiler or architecture can
easily be determined. This allows one to use an
iterative design method for evaluation of performance
in a quantitative manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Waclawsky:1990:DQB,
author = "John G. Waclawsky and Ashok K. Agrawala",
title = "Dynamic queue behavior in networks with window
protocols",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "261--262",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98777",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we employ a deterministic analysis
technique to characterize the dynamic queueing aspects
of window protocols. The deterministic behavior of
these protocols and the deterministic influence of the
resources along the physical path are explicitly
considered in the evaluation of path queue behavior.
Transient and steady state queue behavior of fixed and
sliding window protocols are investigated. We discover
the existence of significant nonlinearities in the
dynamics of queue activity. Window protocols are viewed
as logical simplex pipes. These pipes connect a sender
and a receiver through a series of heterogeneous
physical resources which provide a path of finite delay
between them. Links and nodes make up the path
resources which supply physical connectivity. The
resource with the largest delay is called the
bottleneck resource. Dynamic queue behavior is obtained
by explicitly considering the fact that feedback
mechanisms employed by window protocols make them
inherently cyclic. Thus a group of packets, called a
window, enters the network every cycle. The concept of
a window can be formalized in terms of containers which
are made available to carry packets through a path.
Packets cannot be transmitted without a container.
Controlling the number of containers available at the
protocol sender controls the amount of data flowing in
the path. Packet transmission by the sender, using the
first link, can occur when either the link changes its
status from busy to free or an acknowledgement is
received. The sender is considered ``greedy'' since
fundamental sender operation is to transmit as long as
both packets and containers are available.
Deterministic behavior occurs whenever the arrival rate
of packets to the sender is such that there is always a
packet available for transmission. This situation
occurs frequently in networks for all types of traffic.
In fact, the whole class of ``Batch'' traffic satisfies
this arrival situation because of the rapid generation
of packets by batch applications. The following
assumptions were employed for this analysis. The path
is initially empty. Packets are always available for
transmission by the sender. Thus data flow only stops
when the sender expires his container supply. All
packets (including those containing a request or
acknowledgement) are the same size. No cross traffic is
present. There is no loss or reordering of packets. All
resources follow a work conserving discipline. We
define that departures from one resource occur at the
same time instant as arrivals to the next resource.
Fundamental packet and resource activity shows that the
bottleneck exerts a major influence on path behavior.
This is seen for two reasons. First, when load is
heavy, packets depart from the path under control of
the bottleneck. Thus, the bottleneck controls path
throughput. Second, if a packet is delayed anywhere
along the path it also waits at the bottleneck. Thus,
the bottleneck controls the timing of window protocol
acknowledgements and all resource utilizations. The
queue formation process is seen as a by-product of the
heterogeneous delays that exist along a path. Whenever
a higher speed resource exists at the sender, then
queue sizes increase normally at slower resources along
a path during any period of continuous sender
transmission. Clearly, if path resource delays are
equal along a path or a slower resource exists
``upstream'', then no queue buildup can occur
``downstream'' from the slower or equal speed resource.
Thus, queue build up along a path only occurs at, or
prior to, the bottleneck location. Once the path is
full, whenever both the bottleneck and the protocol
sender are transmitting, then packet build up along the
path occurs at the same rate that containers are
consumed at the sender. Since the arrival rate of
packets to any queue is limited by the slowest upstream
resource in the path, we only examine paths with
increasing resource delays. Paths without these exact
characteristics do make up a substantial portion of
many actual network environments. Queues within these
paths can be analyzed by looking further upstream for
an appropriate arrival rate. This is done by shifting
packet arrival times through the use of a constant for
each queue. Results show that window protocol activity,
along with physical path delays and the value of the
window size, controls both the magnitude of queue sizes
and their rate of change. In addition the cyclic
behavior of the window protocol sender causes cyclic
queue activity all along the path. Queue activity is
found to have three distinct phases. The initial phase
describes queue build up behavior. This phase begins
with the arrival of the first bit of the initial packet
at any queue. Packets arrive at a rate controlled by
the previous upstream link. Queue build up continues
until packet arrivals from the previous upstream
resource temporarily stops. The second phase describes
a short pause until arrivals begin again. Thus, any
queue built up during the first phase begins draining.
The third phase consists of a queue finding a cyclic
pattern of packet arrivals from a previous resource.
Solutions for the occurrence of each phase can be
obtained through an iterative process. This process
involves solving for the same information in the
previous resource queues back to the base case of the
window protocol sender. Additional results show the
behavior of window protocols often forces large queues
to appear near a window protocol sender during initial
protocol activity. At each queue, the maximum queue
size occurs at or right after queue depletion of the
previous upstream resource. Thus queues always drain
and appear further ``downstream'' as data transfer
continues. We refer to this activity as queue
migration. The speed at which a particular queue drains
is called the Queue Drain Rate. This rate is shown to
be a function of the speed of the resource the queue is
feeding and of the bottleneck speed. Queues can be
considered migrating at the Queue Drain Rates of the
various resources. Queue migration continues until the
bottleneck is reached. At this point in time, if the
window size is large enough, a large queue can be (and
often is) permanently maintained at the bottleneck.
This behavior agrees with similar behavior described by
finite population closed queueing systems. These
systems observe that at steady state you are most
likely to find a queue in front of the bottleneck
resource. Steady state begins once sender transmission
becomes cyclic at the bottleneck rate. The queue
migration process begins at this same time. One
intriguing result is that once the sender enters steady
state, the total queue time along the path for the
request packets is an invariant. This is true even
while queue migration is still occurring. It is
interesting to note that despite of the wide spread use
of window protocols no deterministic analysis of their
queueing behavior seems to exist. Yet, the approach
taken in this research appears very promising. Because
deterministic dependencies are most evident when a load
exists, this deterministic analysis technique also
allows the accurate determination of queueing activity
during significant network load, a time network
designers consider most critical. The results are
applicable to the window protocol mechanisms for
congestion and flow control in SNA, and TCP.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garofalakis:1990:PMI,
author = "John D. Garofalakis and Paul G. Spirakis",
title = "The performance of multistage interconnection networks
with finite buffers",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "263--264",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98457.98779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multistage interconnection networks with crossbar
switches are a major component of parallel machines. In
this paper we analyze Banyan networks of $k$ by $k$
switches and with finite buffers. The exact solution of
the steady state distribution of the first stage is
derived in the situation where packets are lost when
they encounter a full buffer (Assumption A). The
solution is a linear combination of $ k - 1$
geometrics. We use this to get an approximation for the
steady state distributions in the second stage and
beyond. As a side effect, the infinite buffer case is
solved, confirming known results. Our results are
validated by extensive simulations. An alternate
situation of networks where full buffers may block
previous switches is also analyzed through an
approximation technique (Assumption B).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vasilakos:1990:AWF,
author = "Athanasios V. Vasilakos and Christos A. Moschonas and
Constantinos T. Paximadis",
title = "Adaptive window flow control and learning algorithms
for adaptive routing in data networks",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "265--266",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a new adaptive flow control algorithm
together with learning routing algorithms. The key
performance measure in both algorithms is packet delay.
Window adjustment and route selection are based on
delay measurements. Simulation results have shown the
superiority of the new scheme over existing
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nussbaum:1990:MCS,
author = "Daniel Nussbaum and Ingmar Vuong-Adlerberg and Anant
Agarwal",
title = "Modeling a circuit switched multiprocessor
interconnect",
journal = j-SIGMETRICS,
volume = "18",
number = "1",
pages = "267--269",
month = may,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/98460.98781",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:09:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaither:1990:ER,
author = "Blaine D. Gaither",
title = "{Editor}'s readings",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "25--26",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.1045579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vance:1990:ARM,
author = "R. E. Vance",
title = "Article review: {`A message-based approach to discrete
event simulation' by R. L. Bagrodia, K. M. Chandy, and
J. Misra. IEEE Trans. Softw. Eng. SE-13, 6 (June
1987)}",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "27--27",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.1045580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As a service to our readers, PER has reached an
agreement to reprint reviews of books and papers on
simulation and modeling that originally appeared in ACM
{\em Computing Reviews}. CR is a monthly journal that
publishes critical reviews on a broad range of
computing subjects including simulation and modeling.
As an ACM member, you can subscribe to CR by writing to
ACM Headquarters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allen:1990:AMS,
author = "Arnold O. Allen and Gary Hynes",
title = "Approximate {MVA} solutions with fixed throughput
classes",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "31--40",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.101321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Transaction (open) type workloads are often used in
approximating computer system workloads which are
actually closed because open workloads provide
reasonable estimates in many cases and their solutions
are straight-forward. We have found that their use can
distort the results for many workloads in a multiclass
queueing network model of a computer system. We have
replaced transaction workloads with what we call {\em
fixed class\/} workloads. We present an approximate
algorithm based on MVA that represents a class with a
given throughput by a corresponding terminal or batch
class, which we call a fixed class workload. We solve
for the closed population required to deliver the
requested throughput. We also present techniques for
overcoming problems encountered in the solution of some
fixed class models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McGehearty:1990:COPb,
author = "Patrick McGehearty",
title = "Challenges in obtaining peak parallel performance with
a {Convex C240}, a parallel vector processor",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "41--47",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.101322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The behavior of the Linpack $ 300 \times 300 $
benchmark is examined in the context of a parallel
vector machine architecture. Detailed evaluation is
performed with respect to the Convex C240. Issues
relating to algorithm design and system characteristics
are discussed in the context of the Linpack
implementation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaither:1990:SVP,
author = "Blaine Gaither",
title = "Scientific visualization of performance data:
evaluation of {DV-Draw}",
journal = j-SIGMETRICS,
volume = "18",
number = "2",
pages = "48--53",
month = aug,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/101320.101323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:39 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This report discusses the attributes of the {\em
DV-Draw\/} package from the VI Corporation of Amherst,
Massachusetts. {\em DV-Draw\/} is a scientific
visualization package which is part of a larger package
called DataViews. The requirements for visualization
software in performance evaluation are identified. The
results of applying {\em DV-Draw\/} to animate the
output of an architectural model were successful.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Taheri:1990:ANN,
author = "H. Reza Taheri",
title = "An analysis of the {Neal Nelson Business Benchmark}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "13--18",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.122236",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Neal Nelson Business Benchmark is an
industry-standard benchmark which is used to evaluate
the performance of UNIX computer systems. The Business
Benchmark purports to give the user an idea of the
performance of the machine under real business UNIX
workloads. In this article we will show that the
Business Benchmark stresses few components of the
system with very simple tests. As such it is more
suited as a component level benchmark or users who want
to focus on the performance of a particular aspect of
the system, rather then a system-level UNIX benchmark
representative of commercial applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keller:1990:SBC,
author = "Tom Keller",
title = "{SPEC} benchmarks and competitive results",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "19--20",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.122237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In less than a year since its Introduction the System
performance Evaluation Cooperative (SPEC ((TM)))
benchmarks have established themselves as an Important
and widely distributed benchmark suite for engineering
and scientific workstations, displacing the old
standards Dhrystone, Linpack and Whetstone. This is
because most workstation vendors support SPEC and have
participated in developing both the benchmarks and a
benchmarking methodology that overcome many of the
failings of the old benchmark standards. SPEC's strong
endorsement by EE TIMES newsmagazine helps insure that
SPEC results are heavily publicized in the Industry.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRCa,
author = "David Finkel",
title = "Book review: {`Computer Systems Performance Management
and Capacity Planning' by J. Cady and B. Howarth
(Prentice-Hall, 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "21--21",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I reviewed this book from the author's manuscript. The
book is now being typeset, and the author tells me that
it is due to appear in February, 1991.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRA,
author = "David Finkel",
title = "Book review: {`The Art of Computer Systems Performance
Analysis' by R. Jain (Wiley-Interscience, 1991)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "21--22",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book grew out of the authors' experience teaching
a course in performance evaluation to final year
undergraduates. This heritage of undergraduate origins
shows up throughout the book. The mathematics is
presented very gently. For example, several complex
formulas are written out twice, once without summation
notation (i.e., $ a_1 $ +$ a_2 $ \ldots{} +$ a_n$) and
then again with summation notation ({\Sigma} $ a_i$).
There are numerous worked out examples, and a wide
range of exercises, from simple ones that just use the
formulas in the text to more challenging exercises.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRCb,
author = "David Finkel",
title = "Book review: {`Computer and Communication Systems
Performance Modelling' by Peter J. B. King (Prentice
Hall 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "22--22",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The preface to this book sets out the author's thesis,
that `computer science students tend to view
performance analysis as a practical discipline \ldots{}
[and] often prefer to rely on physical insight and
intuition rather than formal insights.' Accordingly,
the author's approach is to emphasize useful methods
and applications, rather than formal mathematical
derivations. The background expected of students is
basic operating systems, machine architecture, data
structures, and elementary calculus and basic
probability theory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRQ,
author = "David Finkel",
title = "Book review: {`Quantitative Analysis of Computer
Systems' by C. H. C. Leung (Wiley, 1988)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "22--23",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is, according to the author, `designed for
final year undergraduates in computer sciences, or
conversion course MSc students.' It presumes some
background in elementary probability theory, although
this material is reviewed early in the book.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1990:BRF,
author = "David Finkel",
title = "Book review: {`Fundamentals of Performance Modeling'
by M. K. Molloy (Macmillan, 1989)}",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "23--23",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is intended, according to the author's
preface, for the undergraduate computer science student
without an extensive mathematical background. The book
itself provides the mathematical background, through a
chapter on probability theory, a chapter on transform
theory, and an appendix on mathematical formulas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saavedra-Barrera:1990:MCB,
author = "Rafael Saavedra-Barrera and Alan J. Smith and Eugene
Miya",
title = "Machine Characterization Based on an Abstract
High-level Language Machine",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "24--24",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A {\em linear performance model\/} decomposes program
execution into $n$ distinct operations, such as adds,
multiplies, loads, and stores. A program is
characterized as an $n$-vector of operation counts, and
a machine is characterized as an $n$-vector of
operation times. The dot-product of the two is the time
required for the machine to execute the program. A
linear performance model has several uses:1. Once every
machine and program is characterized, the performance
of each program on each machine can be predicted
without having to run them.2. Two machines (or
programs) can be compared by comparing corresponding
elements of their parameter vectors. The influence of
individual parameters on overall performance can be
used to predict the effect of design changes.3.
Machines (and programs) can be classified by the
similarity of their parameter vectors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panwar:1990:OSP,
author = "Shivendra S. Panwar and Don Towsley and Jack K. Wolf",
title = "Optimal scheduling policies for a class of queues with
customer deadlines to the beginning of service",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "25--25",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper treats the problem of queueing packets
which have an assigned expiration date. If a packet
does not begin processing within the specified time
limit, it is discarded as useless. The primary example
is transmission of voice or video frames over a
packet-switched network, where the illusion of realtime
transmission is to be maintained. The occasional loss
of a packet will reduce transmission quality, but the
voice or video reception should remain intelligible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tokuda:1990:RTM,
author = "Hideyuki Tokuda and Makato Kotera and Clifford E.
Mercer",
title = "A Real-Time Monitor for a Distributed Real-Time
Operating System",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "26--26",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed real-time systems are difficult to
develop. External events occur independently of
internal control, and the real-time system must be
designed to accommodate them correctly. Two problems
emerging from this are the {\em logical\/} correctness
and the {\em timing\/} correctness of the system
software: not only must it process the real-time events
correctly, but the program timing must prevent the task
of processing from interfering with the task of
monitoring.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thiebaut:1990:FDC,
author = "Dominique Thiebaut",
title = "On the Fractal Dimension of Computer Programs and its
Application to the Prediction of the Cache Miss Ratio",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "41--41",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.1045578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fractals are complex, nonsmooth functions with simple
recursive characterizations. Many complex phenomena
resemble fractals, and may therefore be analyzable.
Intuitively the repetition structures of a computer
program should produce patterns of fractal behavior.
This paper shows fractal characteristics of cache-miss
and memory-reference patterns across four program
traces. It should be interesting to those wanting a
simple classification of program behavior; cache
designers should use more exact methods, such as
trace-driven simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ponder:1990:PVA,
author = "Carl Ponder",
title = "Performance variation across benchmark suites",
journal = j-SIGMETRICS,
volume = "18",
number = "3",
pages = "42--48",
month = nov,
year = "1990",
CODEN = "????",
DOI = "https://doi.org/10.1145/122235.122238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:10:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance ratio between two systems tends to
vary across different benchmarks. Here we study this
variation as a `signature' or `fingerprint' of the
systems under consideration. This `fingerprint' can be
used to guess the performance of programs not
represented in a benchmark suite, assess the breadth
and credibility of the benchmark suite, and infer
details of the system design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRMa,
author = "David Finkel",
title = "Book review: {`Multiprocessor Performance' by Erol
Gelenbe (John Wiley \& Sons, 1989)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "9--9",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book begins with a survey of the different
approaches to paralletizing computation: vector or
array processors, loosely-coupled distributed systems,
and multiprocessor systems. The author then states his
principal thesis, that only multiprocessor systems
offer the potential for unlimited processing power in
the machines of the future. Since the impetus for
designing multiprocessor systems is to improve
performance, it is obviously crucial to evaluate the
performance of these systems. This is the task set out
for the rest of the book.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPb,
author = "David Finkel",
title = "Book review: {`Performance Analysis of Transaction
Processing Systems' by Wilbur H. Highleyman (Prentice
Hall, 1989)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "10--10",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045551",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement of computer system performance usually
occupies at most a chapter or two of performance
evaluation texts, but here it is given the book-length
treatment it deserves. The author begins the book with
an introductory chapter discussing the purposes and
goals of performance measurement, which of course
varies from one study to another. He then surveys the
kinds of measurement tools available, and sets out his
philosophy of measurement methodology (which includes
references to Aristotle and the Renaissance world
view), which is expanded in a later chapter.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPa,
author = "David Finkel",
title = "Book review: {`Performance Measurement of Computer
Systems' by Phillip McKerrow (Addison-Wesley 1988)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "10--11",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045552",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As its title indicates, this book has the very
specific purpose of applying performance evaluation
tools to the study of on-line transaction processing
systems. The book provides both an overview of the
relevant mathematical methods from performance
evaluation, and an application of those methods to
transaction processing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRMb,
author = "David Finkel",
title = "Book review: {`Multiple Access Protocols: Performance
and Analysis' by Raphael Rom and Moshe Sidi
(Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "11--11",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The author's central thesis is that responsiveness, or
performance, is crucial to the success of software
systems. Thus performance considerations must be a part
of all stages of software development, starting from
the earliest stages of design. The approach uses a
combination of straightforward data collection and
analysis, and mathematically sophisticated techniques.
The mathematical treatment is entirely self-contained,
and no extensive mathematical background is assumed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPc,
author = "David Finkel",
title = "Book review: {`Performance Engineering of Software
Systems' by Connie U. Smith (Addison-Wesley, 1990)}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "11--12",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.1045554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This books concentrates on the important topic of the
performance of computer communications networks,
particularly on the performance of the multiple access
protocols they use. The approach is mathematical, and
the authors clearly state the mathematical
prerequisities they expect from the reader: probability
theory, stochastic processes in general, and Markov
chains and the M/G/1 queue in particular. The
mathematical prerequisities allow the authors to do a
careful and complete job of deriving the results they
need. Each chapter ends with a set of challenging
exercises, for those who wish to use the book as a
text, and the book ends with an extensive
bibliography.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Johari:1991:POH,
author = "Shyam Johari",
title = "Performance objectives --- how to define them",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "18--19",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.122290",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The successful development of a product demands that
product performance objectives be clearly defined and
agreed to as early as possible during the product
development cycle, typically during the product
requirements phase. Unless clearly defined and
uniformly understood, performance objectives can be
subject to varied interpretation as product nears
completion. Why? Because all parties (e.g., Product
Marketing, Product Management, and Product Development)
involved have their own performance perspective. How to
clearly define the product performance objectives would
be the thrust of this note.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ponder:1991:BS,
author = "Carl G. Ponder",
title = "Benchmark semantics",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "20--24",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.122291",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Be careful when interpreting benchmark measurements
that compare two languages or two implementations of
the same language. A program expressed in two different
languages rarely computes the exact same function in
both cases. The same goes for a program run on two
different implementations of the same language. The
implementation details ultimately affect the language
semantics as well as the benchmark performance. Here
are some simple examples of this effect.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cabrera:1991:TSS,
author = "Luis-Felipe Cabrera",
title = "Technical summary of the {Second IEEE Workshop on
Workstation Operating Systems}",
journal = j-SIGMETRICS,
volume = "18",
number = "4",
pages = "25--32",
month = apr,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122289.122292",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:05 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The broad spectrum of universities, industrial
research laboratories, and computer companies
represented at the Second IEEE Workshop on Workstation
Operating Systems provided a rich snapshot of current
activities in operating systems. There were
representatives of 19 operating system research
projects among the participants and several from
commercial offerings. The attendees came from seven
countries on three continents: North America, Europe,
and South America. Since the last Workshop in 1987,
there have been more advances in hardware than in
software functions. Software standards continue to
emerge in the areas of operating system interfaces,
page description languages, window management
interfaces, and communication protocols. New software
applications exist in the areas of multimedia and
multi-node computing. Object-oriented technology is
already present in running systems and gaining
importance. The areas that the participants perceived
needing most future work were operating system
abstractions, workstation operation, system
responsiveness, input output, network services,
management of clusters of workstations, and failure
handling. While processor speeds, main memory access
speeds, memory density, and secondary storage capacity
continue to increase fast, disk seek times have
decreased only slightly, and the bandwidth of most
local-area networks has not increased at all. FDDI
networks are just beginning to be deployed. The
software is adjusting to this hardware scenario by
using caching at multiple levels of the systems. In the
last two years large main memories at individual
computing nodes and multi-node computer installations
have become common. It is expected that most future
computing nodes will have substantial local storage and
that high-bandwidth networks will enable the support of
continuous media like voice and video. Input output, to
disks, to networks, and to user-oriented devices, is
expected to become the central problem in future
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Melliar-Smith:1991:PAB,
author = "P. M. Melliar-Smith and Louise E. Moser",
title = "Performance analysis of a broadcast communications
protocol",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "1--10",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Trans protocol is a communications protocol that
exploits the broadcast capability of local area
networks. Classical Markov models and queueing theory
are used to analyze the performance of components of
this protocol, but cannot be applied directly to
determine the performance of the protocol as a whole.
Instead, Laplace transforms of the distributions for
the components are first derived and then combined into
a transform for the entire protocol. This transform is
evaluated by contour integration to yield the latency
for the protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danzig:1991:AMO,
author = "Peter B. Danzig",
title = "An analytical model of operating system protocol
processing including effects of multiprogramming",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "11--20",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model the limited buffer queueing process that
occurs within the UNIX operating system's protocol
processing layers. Our model accounts for the effects
of user process multiprogramming and preemptive,
priority scheduling of interrupt, operating system, and
user tasks. After developing the model, we use it to
predict message loss that occurs during local area
network (LAN) multicast. Our service time model can be
applied to window-and rate-based stream flow control.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harinarayan:1991:LSL,
author = "Venkatesh Harinarayan and Leonard Kleinrock",
title = "Load sharing in limited access distributed systems",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "21--30",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107975",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we examine dynamic load sharing in
limited access distributed systems. In this class of
distributed systems all servers are not accessible to
all sources, and there exist many different
accessibility topologies. We focus our attention on the
ring topology and provide an analytic model to derive
the approximate mean waiting time (our metric of
performance). We then consider other limited access
topologies and find that rather different
interconnection patterns give similar performance
measurements. We conjecture that the number of servers
accessible to a source is the parameter with the
greatest performance impact, in a limited access
topology with load sharing. We also introduce another
variable called diversity that is indicative of the
degree of load sharing and speculate that performance
is reasonably insensitive to diversity so long as it is
non-zero. Using these conjectures we show how a
reasonable estimate of the mean waiting time can be
analytically derived in many limited access
topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1991:SPA,
author = "Tein-Hsiang Lin and Wernhuar Tarng",
title = "Scheduling periodic and aperiodic tasks in hard
real-time computing systems",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "31--38",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107976",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling periodic and aperiodic tasks to meet their
time constraints has been an important issue in the
design of real-time computing systems. Usually, the
task scheduling algorithms in such systems must satisfy
the deadlines of periodic tasks and provide fast
response times for aperiodic tasks. A simple and
efficient approach to scheduling real-time tasks is the
use of a periodic server in a static preemptive
scheduling algorithm. Periodic tasks, including the
server, are scheduled {\em at priori\/} to meet their
deadlines according to the knowledge of their periods
and computation times. The scheduling of aperiodic
tasks is then managed by the periodic server during its
service time. In this paper, a new scheduling algorithm
is proposed. The new algorithm creates a periodic
server which will have the highest priority but not
necessarily the shortest period. The server is
suspended to reduce the overhead if there are no
aperiodic tasks waiting, and is activated immediately
upon the arrival of the next aperiodic task. After
activated, the server performs its duty periodically
until all waiting aperiodic tasks are completed. For a
set of tasks scheduled by this algorithm, the deadlines
of periodic tasks are guaranteed by a deterministic
feasibility check, and the mean response time of
aperiodic tasks are estimated using a queueing model.
Based on the analytical results, we can determine the
period and service time of the server producing the
minimum mean response time for aperiodic tasks. The
analytical results are compared with simulation results
to demonstrate the correctness of our model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:1991:ADC,
author = "Robert Berry and Joseph Hellerstein",
title = "An approach to detecting changes in the factors
affecting the performance of computer systems",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "39--49",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Resolving intermittent performance problems in
computer systems is made easier by pinpointing when a
change occurs in the system's performance-determining
factors (e.g., workload composition, configuration).
Since we often lack direct measurements of performance
factors, this paper presents a procedure for indirectly
detecting such changes by analyzing performance
characteristics (e.g., response times, queue lengths).
Our procedure employs a widely used clustering
algorithm to identify candidate change points (the
times at which performance factors change), and a newly
developed statistical test (based on an AR(1) time
series model) to determine the significance of
candidate change points. We evaluate our procedure by
using simulations of M/M/1, FCFS queueing systems and
by applying our procedure to measurements of a
mainframe computer system at a large telephone company.
These evaluations suggest that our procedure is
effective in practice, especially for larger sample
sizes and smaller utilizations. We further conclude
that indirectly detecting changes in performance
factors appears to be inherently difficult in that the
sensitivity of a detection procedure depends on the
magnitude of the change in performance characteristics,
which often has a nonlinear relationship with the
change in performance factors. Thus, a change in
performance factors (e.g., increased service times) may
be more readily detected in some situations (e.g., very
low or very high utilizations) than in others (e.g.,
moderate utilizations). A key insight here is that the
sensitivity of the detection procedure can be improved
by choosing appropriate measures of performance
characteristics. For example, our experience and
analysis suggest that queue lengths can be more
sensitive than response times to changes in arrival
rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bodnarchuk:1991:SWM,
author = "Robert Bodnarchuk and Richard Bunt",
title = "A synthetic workload model for a distributed system
file server",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "50--59",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107978",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The accuracy of the results of any performance study
depends largely on the quality of the workload model
driving it. Not surprisingly then, workload modelling
is an area of great interest to those involved in the
study of computer system performance. While a
significant amount of research has focussed on the
modelling of workloads in a centralized computer
system, little has been done in the context of
distributed systems. The goal of this research was to
model the workload of a distributed system file server
in a UNIX/NFS environment. The resulting model is
distribution-driven and generates workload components
in real time. It runs externally to the system it
drives, thus eliminating any interference at the
server. The model was validated for different workload
intensities to ensure that it provides the flexibility
to vary the workload intensity without loss of
accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merchant:1991:MCA,
author = "Arif Merchant",
title = "A {Markov} chain approximation for the analysis of
{Banyan} networks",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "60--67",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper analyzes the delay suffered by messages in
a clocked, packet-switched, square Banyan network with
$ k \times k $ output-buffered switches by
approximating the flow processes in the network with
Markov chains. We recursively approximate the departure
process of buffers of the $ n^{\rm th} $ stage in terms
of that at the $n$-- l$^{st}$ stage. We show how to
construct the transition matrix for the Markov chain at
each stage of the network and how to solve for the
stationary distribution of the delay in the queues of
that stage. The analytical results are compared with
simulation results for several cases. Finally, we give
a method based on this approximation and the technique
of {\em coupling\/} to compute upper bounds on the time
for the system to approach steady state.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:1991:PAF,
author = "T. Lin and L. Kleinrock",
title = "Performance analysis of finite-buffered multistage
interconnection networks with a general traffic
pattern",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "68--78",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107980",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an analytical model for evaluating the
performance of finite-buffered packet switching
multistage interconnection networks using blocking
switches under any general traffic pattern. Most of the
previous research work has assumed unbuffered, single
buffer or infinite buffer cases, and all of them
assumed that every processing element had the same
traffic pattern (either a uniform traffic pattern or a
specific hot spot pattern). However, their models
cannot be applied very generally. There is a need for
an analytical model to evaluate the performance under
more general conditions. We first present a description
of a decomposition {\&} iteration model which we
propose for a specific hot spot pattern. This model is
then extended to handle more general traffic patterns
using a transformation method. For an even more general
traffic condition where each processing element can
have its own traffic pattern, we propose a
superposition method to be used with the iteration
model and the transformation method. We can extend the
model to account for processing elements having
different input rates by adding weighting factors in
the analytical model. An approximation method is also
proposed to refine the analytical model to account for
the memory characteristic of a blocking switch which
causes persistent blocking of packets contending for
the same output ports. The analytical model is used to
evaluate the uniform traffic pattern and a very general
traffic pattern ` EFOS'. Comparison with simulation
indicates that the analytical model is very accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wood:1991:MET,
author = "David A. Wood and Mark D. Hill and R. E. Kessler",
title = "A model for estimating trace-sample miss ratios",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "79--89",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107981",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unknown references, also known as cold-start misses,
arise during trace-driven simulation of uniprocessor
caches because of the unknown initial conditions.
Accurately estimating the miss ratio of unknown
references, denoted by \mu, is particularly important
when simulating large caches with short trace samples,
since many references may be unknown. In this paper we
make three contributions regarding \mu. First, we
provide empirical evidence that \mu is much larger than
the overall miss ratio (e.g., 0.40 vs. 0.02). Prior
work suggests that they should be the same. Second, we
develop a model that explains our empirical results for
long trace samples. In our model, each block frame is
either {\em live}, if its next reference will hit, or
dead, if its next reference will miss. We model each
block frame as an alternating renewal process, and use
the renewal-reward theorem to show that \mu is simply
the fraction of time block frames are dead. Finally, we
extend the model to handle short trace samples and use
it to develop several estimators of \mu. Trace-driven
simulation results show these estimators lead to better
estimates of overall miss ratios than do previous
methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:1991:EMV,
author = "Mee-Chow Chiang and Gurindar S. Sohi",
title = "Experience with mean value analysis model for
evaluating shared bus, throughput-oriented
multiprocessors",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "90--100",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We report on our experience with the accuracy of mean
value analysis analytical models for evaluating shared
bus multiprocessors operating in a throughput-oriented
environment. Having developed separate models for
multiprocessors with circuit switched and split
transaction, pipelined (packet switched) buses, wc
compare the results of the models with those of an
actual trace-driven simulation for 5,376 multiprocessor
configurations. We find that the analytical models are
accurate in predicting the individual processor
throughputs and partial bus utilizations. For processor
throughput, the difference between the results of the
models and simulation are within 1\% for 75\% of the
cases and within 3\% in 94\% of all cases. For partial
bus utilization the model results are with 1\% of
simulation results in 70\% of all cases and within 3\%
in 92\% of all cases. The models are less accurate in
predicting cache miss latency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1991:PAT,
author = "Anurag Gupta and Ian Akyildiz and Richard M.
Fujimoto",
title = "Performance analysis of {Time Warp} with homogeneous
processors and exponential task times",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "101--110",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107983",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The behavior of $n$ interacting processors
synchronized by the `Time Warp' protocol is analyzed
using a discrete state continuous time Markov chain
model. The performance and dynamics of the processes
are analyzed under the following assumptions:
exponential task times and times-tamp increments on
messages, each event message generates one new message
that is sent to a randomly selected process, negligible
rollback, state saving, and communication delay,
unbounded message buffers, and homogeneous processors
that are never idle. We determine the fraction of
processed events that commit, speedup, rollback
probability, expected length of rollback, the
probability mass function for the number of uncommitted
processed events, and the probability distribution
function for the virtual time of a process. The
analysis is approximate, so the results have been
validated through performance measurements of a Time
Warp testbed (PHOLD workload model) executing on a
shared memory multiprocessor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:1991:SDH,
author = "Jong Kim and Chita R. Das",
title = "On subcube dependability in a hypercube",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "111--119",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107984",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present an analytical model for
computing the dependability of hypercube systems. The
model, referred to as task-based dependability (TBD),
is developed under the assumption that a task needs at
least an $m$-cube ($m$) ????",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1991:IOS,
author = "Anoop Gupta and Andrew Tucker and Shigeru Urushibara",
title = "The impact of operating system scheduling policies and
synchronization methods of performance of parallel
applications",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "120--132",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107985",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Shared-memory multiprocessors are frequently used as
compute servers with multiple parallel applications
executing at the same time. In such environments, the
efficiency of a parallel application can be
significantly affected by the operating system
scheduling policy. In this paper, we use detailed
simulation studies to evaluate the performance of
several different scheduling strategies, These include
regular priority scheduling, coscheduling or gang
scheduling, process control with processor
partitioning, handoff scheduling, and affinity-based
scheduling. We also explore tradeoffs between the use
of busy-waiting and blocking synchronization primitives
and their interactions with the scheduling strategies.
Since effective use of caches is essential to achieving
high performance, a key focus is on the impact of the
scheduling strategies on the caching behavior of the
applications. Our results show that in situations where
the number of processes exceeds the number of
processors, regular priority-based scheduling in
conjunction with busy-waiting synchronization
primitives results in extremely poor processor
utilization. In such situations, use of blocking
synchronization primitives can significantly improve
performance. Process control and gang scheduling
strategies are shown to offer the highest performance,
and their performance is relatively independent of the
synchronization method used. However, for applications
that have sizable working sets that fit into the cache,
process control performs better than gang scheduling.
For the applications considered, the performance gains
due to handoff scheduling and processor affinity are
shown to be small.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:1991:PPB,
author = "Songnian Zhou and Timothy Brecht",
title = "Processor-pool-based scheduling for large-scale {NUMA}
multiprocessors",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "133--142",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107986",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale Non-Uniform Memory Access (NUMA)
multiprocessors are gaining increased attention due to
their potential for achieving high performance through
the replication of relatively simple components.
Because of the complexity of such systems, scheduling
algorithms for parallel applications are crucial in
realizing the performance potential of these systems.
In particular, scheduling methods must consider the
scale of the system, with the increased likelihood of
creating bottlenecks, along with the NUMA
characteristics of the system, and the benefits to be
gained by placing threads close to their code and data.
We propose a class of scheduling algorithms based on
{\em processor pools}. A processor pool is a software
construct for organizing and managing a large number of
processors by dividing them into groups called pools.
The parallel threads of a job are run in a single
processor pool, unless there are performance advantages
for a job to span multiple pools. Several jobs may
share one pool. Our simulation experiments show that
processor pool-based scheduling may effectively reduce
the average job response time. The performance
improvements attained by using processor pools increase
with the average parallelism of the jobs, the load
level of the system, the differentials in memory access
costs, and the likelihood of having system bottlenecks.
As the system size increases, while maintaining the
workload composition and intensity, we observed that
processor pools can be used to provide significant
performance improvements. We therefore conclude that
processor pool-based scheduling may be an effective and
efficient technique for scalable systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1991:ATM,
author = "Mark S. Squillante and Randolph D. Nelson",
title = "Analysis of task migration in shared-memory
multiprocessor scheduling",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "143--155",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107987",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In shared-memory multiprocessor systems it may be more
efficient to schedule a task on one processor than on
mother. Due to the inevitability of idle processors in
these environments, there exists an important tradeoff
between keeping the workload balanced and scheduling
tasks where they run most efficiently. The purpose of
an adaptive task migration policy is to determine the
appropriate balance between the extremes of this load
sharing tradeoff. We make the observation that there
are considerable differences between this load sharing
problem in distributed and shared-memory multiprocessor
systems, and we formulate a queueing theoretic model of
task migration to study the problem. A detailed
mathematical analysis of the model is developed, which
includes the effects of increased contention for system
resources induced by the task migration policy. Our
objective is to provide a better understanding of task
migration in shared-memory multiprocessor environments.
In particular, we illustrate the potential for
significant improvements in system performance, and we
show that even when migration costs are large it may
still be beneficial to migrate waiting tasks to idle
processors. We further demonstrate the potential for
unstable behavior under migratory scheduling policies,
and we provide optimal policy thresholds that yield the
best performance and avoid this form of processor
thrashing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dan:1991:AMH,
author = "Asit Dan and Daniel M. Dias and Philip S. Yu",
title = "Analytical modelling of a hierarchical buffer for a
data sharing environment",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "156--167",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107988",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a data sharing environment, where a number of
loosely coupled computing nodes share a common storage
subsystem, the effectiveness of a private buffer at
each node is limited due to the multi-system
invalidation effect, particularly under a non-uniform
data access pattern. A global shared buffer can be
introduced to alleviate this problem either as a disk
cache or shared memory. In this paper we developed an
approximate analytic model to evaluate different shared
buffer management policies (SBMPs) which differ in
their choice of data granules to be put into the shared
buffer. The analytic model can be used to study the
trade-offs of different SBMPs and the impact of
different buffer allocations between shared and private
buffers. The effects of various parameters, such as,
the probability of update, the number of nodes, the
sizes of private and shared buffer, etc., on the
performance of SBMPS are captured in the analytic
model. A detailed simulation model is also developed to
validate the analytic model. We show that dependency
between the contents of the private and shared buffers
can play an important role in determining the
effectiveness of the shared buffer particularly for a
small number of nodes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reiman:1991:PAC,
author = "Martin Reiman and Paul E. Wright",
title = "Performance analysis of concurrent-read
exclusive-write",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "168--177",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the concurrent-read exclusive-write
protocol for access to a shared resource, such as
occurs in database and distributed operating systems.
Readers arrive according to a Poisson process and
acquire shareable i.e., non-exclusive, locks which,
once granted, are released after a generally
distributed random period. Writers arrive according to
an arbitrary renewal process and acquire exclusive
locks which, once granted, are held for a random time
which is also generally distributed. Locks are granted
in the order in which requests are received. We derive
necessary and sufficient conditions under which the
queue is stable i.e., the latencies for reader/writer
lock acquisition have a limiting distribution. In the
unstable case, the delays of successive readers/writers
become unbounded. The stability condition is sensitive
to the interarrival-time distribution of the writers
and the lock holding-time distribution of the readers
but depends only on the mean lock holding-time of the
writers. Distributional and moment bounds are given for
the latencies of read/write requests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{French:1991:PMP,
author = "James C. French and Terrence W. Pratt and Mriganka
Das",
title = "Performance measurement of a parallel input\slash
output system for the {Intel iPSC\slash 2 Hypercube}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "178--187",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107990",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Intel Concurrent File System (CFS) for the iPSC/2
hypercube is one of the first production file systems
to utilize the declustering of large files across
numbers of disks to improve I/O performance. The CFS
also makes use of dedicated I/O nodes, operating
asynchronously, which provide file caching and
prefetching. Processing of I/O requests is distributed
between the compute node that initiates the request and
the I/O nodes that service the request. The effects of
the various design decisions in the Intel CFS are
difficult to determine without measurements of an
actual system. We present performance measurements of
the CFS for a hypercube with 32 compute nodes and four
I/0 nodes (four disks). Measurement of read/write rates
for one compute node to one I/O node, one compute node
to multiple I/O nodes, and multiple compute nodes to
multiple I/O nodes form the basis for the study.
Additional measurements show the effects of different
buffer sizes, caching, prefetching, and file
preallocation on system performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chervenak:1991:PDA,
author = "Ann L. Chervenak and Randy H. Katz",
title = "Performance of a disk array prototype",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "188--197",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107991",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The RAID group at U.C. Berkeley recently built a
prototype disk array. This paper examines the
performance limits of each component of the array using
SCSI bus traces, Sprite operating system traces and
user programs. The array performs successfully for a
workload of small, random I/O operations, achieving 275
I/Os per second on 14 disks before the Sun4/280 host
becomes CPU-limited. The prototype is less successful
in delivering high throughput for large, sequential
operations. Memory system contention on the Sun4/280
host limits throughput to 2.3 MBytes/sec under the
Sprite Operating System. Throughput is also limited by
the bandwidth supported by the VME backplane, disk
controller and disks, and overheads associated with the
SCSI protocol. We conclude that merely using a powerful
host CPU and many disks will not provide the full
bandwidth possible from disk arrays. Host memory
bandwidth and throughput of disk controllers are
equally important. In addition, operating systems
should avoid unnecessary copy and cache flush
operations that can saturate the host memory system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1991:PMD,
author = "Shenze Chen and Don Towsley",
title = "Performance of a mirrored disk in a real-time
transaction system",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "198--207",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107992",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk mirroring has found widespread use in computer
systems as a method for providing fault tolerance. In
addition to increasing reliability, a mirrored disk can
also reduce I/O response time by supporting the
execution of parallel I/O requests. The improvement in
I/O efficiency is extremely important in a real-time
system, where each computational entity carries a
deadline. In this paper, we present two classes of
real-time disk scheduling policies, RT-DMQ and RT-CMQ,
for a mirrored disk I/O subsystem and examine their
performance in an integrated real-time transaction
system. The real-time transaction system model is
validated on a real-time database testbed, called
RT-CARAT. The performance results show that a mirrored
disk I/O subsystem can decrease the fraction of
transactions that miss their deadlines over a single
disk system by 68\%. Our results also reveal the
importance of real-time scheduling policies, which can
lead up to a 17\% performance improvement over
non-real-time policies in terms of minimizing the
transaction loss ratio.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glenn:1991:IMP,
author = "R. R. Glenn and D. V. Pryor",
title = "Instrumentation for a massively parallel {MIMD}
application",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "208--209",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107993",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an application implemented on a
simulated machine called Horizon. One purpose of this
study is to investigate some of the features of a
possible future machine (or class of machines) with a
view toward deciding, early on in the research cycle,
where problems may come up, what features should be
added or strengthened, and what proposed features seem
to be unnecessary. Another purpose is to learn more
about how to program, instrument and debug a shared
memory, massively parallel MIMD computer, and to begin
to answer some of the questions: What tools does a
programmer need to debug this type of machine? How can
a programmer know if the machine is performing well?
How can bottlenecks be identified? How can the massive
amount of instrumentation information be condensed and
presented to a user in a way that makes sense?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldberg:1991:MMD,
author = "Aaron Goldberg and John Hennessy",
title = "{MTOOL}: a method for detecting memory bottlenecks",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "210--211",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107994",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new, relatively inexpensive
method for detecting regions (e.g. loops and
procedures) in a program where the memory hierarchy is
performing poorly. By observing where actual measured
execution time differs from the time predicted given a
perfect memory system, we can isolate memory
bottlenecks. MTOOL, an implementation of the approach
aimed at applications programs running on MIPS-chip
based workstations is described and results for some of
the Perfect Club and SPEC benchmarks are summarized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:1991:ISS,
author = "Yul H. Kim and Mark D. Hill and David A. Wood",
title = "Implementing stack simulation for highly-associative
memories",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "212--213",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107995",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Prior to this work, all implementations of stack
simulation [MGS70] required more than linear time to
process an address trace. In particular these
implementations are often slow for highly-associative
memories and traces with poor locality, as can be found
in simulations of tile systems. We describe a new
implementation of stack simulation where the referenced
block and its stack distance are found using a hash
table rather than by traversing the stack. The key to
this implementation is that designers are rarely
interested in a continuum of memory sizes, but instead
desire metrics for only a discrete set of alternatives
(e.g., powers of two). Our experimental evaluation
shows the run-time of the new implementation to be
linear in address trace length and independent of trace
locality. Kim, et al., [KHW91] present the results of
this research in more detail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Newman:1991:PAC,
author = "Robb Newman",
title = "Performance analysis case study (abstract):
application of experimental design \& statistical data
analysis techniques",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "214--215",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107996",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common requirement of computer vendor's competitive
performance analysis departments is to measure and
report on the performance characteristics of another
vendor's system. In many cases the amount of prior
knowledge concerning the competitor's system is limited
to sales brochures and non-technical publications.
Availability of the system for benchmarking is minimal;
there is little choice concerning memory and I/O
configurations; and time to complete the project is
short. A project of this nature is not, however, unique
to computer vendors. Many users of computer systems
that want to better understand a system's performance
characteristics before deciding on a purchase, are also
faced with similar restrictions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Park:1991:MPB,
author = "Arvin Park and Jeffrey C. Becker",
title = "Measurements of the paging behavior of {UNIX}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "216--217",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107997",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper analyzes measurements of paging activity
from several different versions of UNIX. We set out to
characterize paging activity by first taking
measurements of it, and then writing programs to
analyze it. In doing so, we were interested in
answering several questions:\par
1. What is the magnitude of paging traffic and how much
of I/O system activity is paging related?\par
2. What are the characteristics of paging activity, and
how can paging system implementations be tuned to match
them?\par
3. How does paging activity vary across different
machines, operating systems, and job mixes?\par
4. How well does paging activity correlate with system
load average and number of users?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pasquale:1991:SDW,
author = "Joseph Pasquale and Barbara Bittel and Daniel
Kraiman",
title = "A static and dynamic workload characterization study
of the {San Diego Supercomputer Center Cray X-MP}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "218--219",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The San Diego Supercomputer Center is one of four NSF
sponsored national supercomputer centers. Up until
January of 1990, its workhorse was a Cray X-MP, which
served 2700 researchers from 170 institutions, spanning
44 states. In order to better understand how this
supercomputer was utilized by its diverse community of
users, we undertook a workload characterization study
of the Cray X-MP. The goals of our study were twofold.
First, we wished to characterize the workload at both
the functional and resource levels. The functional
level represents the user point of view: what types of
programs users are running on the system. The resource
level represents the system point of view: how the
systems resources (CPU, memory, I/O bandwidth) are
being used. Second, we wanted to see how the workload
changed over an average weekday. Thus, we conducted a
static characterization to understand its global
attributes over the entire measurement period, as well
as a dynamic workload characterization to understand
the time behavior of the workload over a weekday
cycle.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pu:1991:EMA,
author = "Calton Pu and Frederick Korz and Robert C. Lehman",
title = "An experiment on measuring application performance
over the {Internet}",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "220--221",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.107999",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of wide area networks (WANs) such as the
Internet is growing at a tremendous rate. Such networks
hold great promise for new types of distributed
applications, which will be widely distributed, highly
replicated, intensely interactive, and adaptive to many
types of network conditions. Developing such
applications will require a solid understanding of the
performance and availability characteristics of WANs as
they evolve. The ability to measure the effect of these
conditions will, for example, be important for
large-volume applications such as digital libraries,
and for near-real-time applications such as
collaborative research and teleconferencing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:1991:PBB,
author = "Myung K. Yang and Chita R. Das",
title = "A parallel branch-and-bound algorithm for {MIN}-based
multiprocessors",
journal = j-SIGMETRICS,
volume = "19",
number = "1",
pages = "222--223",
month = may,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/107972.108000",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:11:17 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A parallel `Decomposite Best-First' search
Branch-and-Bound algorithm ({\em pdbsbb\/}) for
MIN-based multiprocessor systems is proposed in this
paper. A conflict free mapping scheme, known as {\em
step-by-step spread}, is used to map the algorithm
efficiently on to a MIN-based system for reducing
communication overhead. It is shown that the proposed
algorithm provides better speed-up than other reported
schemes when communication overhead is taken into
consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epema:1991:BRC,
author = "Dick H. J. Epema",
title = "Book Review: {`Computer and Communication Systems
Performance Modelling' by Peter J. B. King (Prentice
Hall, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "4--5",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045494",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book offers a simple and short introduction to
the theory of queueing models of computer and
communication systems. It consists of 14 chapters.
After the first, which gives the motivation and a
feeling for the subject (among other things, by an
informal proof and some simple illustrations of
Little's theorem), there are two preparatory chapters
on probability theory and stochastic processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Al-Jaar:1991:BRA,
author = "Robert Y. Al-Jaar",
title = "Book review: {`The Art of Computer Systems Performance
Analysis: Techniques for Experimental Design,
Measurement, Simulation, and Modeling' by Raj Jain
(John Wiley \& Sons 1991)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "5--11",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045495",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the preface to {\em The Art of Computer Systems
Performance Analysis: Techniques for Experimental
Design, Measurement, Simulation, and Modeling}, Raj
Jain discusses the intended audience and the goals of
the book, which are to:$ \bullet $ Provide computer
professionals simple and straightforward performance
analysis techniques in a comprehensive textbook. $
\bullet $ Give basic modeling, simulation, measurement,
experimental design, and statistical analysis
background. $ \bullet $ Emphasize and integrate the
modeling and measurement aspects of performance
analysis. $ \bullet $ Discuss common mistakes and games
in performance analysis studies. $ \bullet $ Illustrate
the presented techniques using examples and case
studies from the field of computer systems. $ \bullet $
Summarize key techniques and results in `boxes'. $
\bullet $ Organize chapters in 45-minute lectures and
include appropriate exercises.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPd,
author = "David Finkel",
title = "Brief review: {`Probability, Statistics and Queueing
Theory with Computer Science Applications,' Second
Edition by Arnold O. Allen (Academic Press 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "11--12",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045496",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a revision of the classic probability and
statistics text originally written in 1978. Like the
first edition, this book is designed for a upper-level
undergraduate course in probability and statistics with
computer science applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRC,
author = "David Finkel",
title = "Brief review: {`Computer Networks \& Systems: Queueing
Theory and Performance Evaluation' by Thomas Robertazzi
(Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "12--12",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045498",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is the proceedings of the Workshop on
Parallel Computer Systems: Performance Instrumentation
and Visualization held in Santa Fe, New Mexico in May,
1989. Some of the sixteen papers included here discuss
research projects designed primarily to collect
performance data from distributed and parallel systems.
Other papers discuss modern visualization techniques in
general, or report on projects to put these powerful
techniques to work on parallel computer system
performance data, to make this data easier to
understand and to use to improve system or program
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRQ,
author = "David Finkel",
title = "Brief review: {``Queueing Networks --- Exact
Computational Algorithms: A Unified Theory Based on
Decomposition and Aggregation'' by Adrian E. Conway and
Nicholas D. Georganas (MIT Press 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "12--12",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unlike the other, more specialized, books given brief
reviews in this issue, this book would be an
appropriate text for an introductory graduate course in
performance evaluation. The book presumes a knowledge
of probability theory, which is reviewed in an
appendix. There is a chapter on single queueing
systems, which covers the M/M/1 queueing system in
detail, and a number of related models. In particular,
the author has a section on reversibility and one on
the M/G/1 queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRPe,
author = "David Finkel",
title = "Brief review: {`Performance Instrumentation \&
Visualization' by Margaret Simmons and Rebecca Koskela
(Addison-Wesley \& ACM Press, 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "12--13",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045499",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book presents a thorough discussion of exact
algorithms for product-form queueing networks. The
authors discuss the well-known Convolution Algorithm,
and Mean Value Analysis (MVA), as well as some more
recent algorithms: Recursion by Chain (RECAL), Mean
Value Analysis by Chain (MVAC), and the Distribution
Analysis by Chain (DAC).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:BRS,
author = "David Finkel",
title = "Brief review: {`Stochastic Analysis of Computer and
Communication Systems', Ed. by H. Takagi (Elsevier
Science Publishers B.V. 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "13--13",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045500",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is the first volume in a series of books,
designed to give an introduction to research-level
topics in queueing theory applicable to performance
evaluation. As such, it presumes as background a
careful mathematical study of introductory queueing
theory topics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Frankel:1991:BRQ,
author = "David Frankel",
title = "Brief review: {`Queueing Analysis: A Foundation of
Performance Evaluation. Volume 1: Vacation and Priority
Systems, Part 1' by H. Takagi (North-Holland, 1991)}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "13--13",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.1045501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a collection of articles, written especially
for this publication, designed to show the rich variety
of stochastic models applicable to studying the
performance of computer and communications systems.
There are a total of twenty articles, divided into four
sections. The first section, Stochastic Processes,
includes articles using general presenting stochastic
process models applied to computer and communications
system modeling. The second section, Queues, presents
queueing theoretic models which are applicable to
performance modeling, although these articles
concentrate on the queueing models themselves. The
final two sections, Computer Systems and Communication
Systems, present applications of analytic modeling to
these kinds of systems. The final article is an
extensive bibliography compiled by Dr. Takagi of works
on performance evaluation. These are separate sections
for books, special issues of journals, conference
proceedings, and survey and tutorial articles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ames:1991:CTP,
author = "D. Ames and D. Gibson and B. Troy",
title = "Composite theoretical performance",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "24--29",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Export controls require that computer systems,
specifically Digital Central Processing units, be
characterized as to performance. Absolute performance
measurement is not required, rather a very wide range
of CPUs, from micros to supercomputers, must be rank
ordered. Ranking is based on a synthetic
characterization and is influenced by the design
details of the particular processor that make it useful
for one or more strategic applications. This paper
describes the strategic export control concerns, the
rationale involved in the choice of a metric, the
technical considerations, and the elements included in
the CTP metric.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Christianson:1991:ALE,
author = "Bruce Christianson",
title = "{Amdahl's Law} and end of system design",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "30--32",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gene Amdahl has persuasively argued that there are
severe technology-independent limits on the performance
gains which can be achieved by using massively parallel
processing. This conclusion (popularly called {\em
Amdahl's Law\/}) has been supported by a number of
different arguments [1], advanced in the context of
vector processing and also in the context of the
hypercube architecture.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1991:OWB,
author = "David Finkel and Robert E. Kinicki and Jonas A.
Lehmann",
title = "An overview of the {WPI Benchmark Suite}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "33--35",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The November 1990 issue of Performance Evaluation
Review included a number of articles and opinions on
the merits of commercial bench-mark suites. In the
spirit of continuing this discussion, we present here a
brief introduction to the WPI Benchmark Suite.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Becker:1991:APB,
author = "Jeffrey C. Becker and Arvin Park",
title = "Analysis of the paging behavior of {UNIX}",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "36--43",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the paging behavior of several different
versions of UNIX by recording traces of paging activity
over time and writing programs to analyze the traces.
We recorded periodic totals of paging events instead of
individual paging events themselves. Our analysis shows
that paging activity accounts for between 15\% and 21\%
of all disk block accesses. Average paging system
traffic is very low. The paging system is idle most of
the time and paging activity occurs in large periodic
bursts. Despite the fact that it is often overlooked,
swap related paging accounts for a significant portion
of all paging activity (between 24\% and 71\%).
Furthermore, the behavior of swap-related paging
differs greatly from the well-studied behavior of
demand paging. The ratio of pages read to pages written
(which varies between 0.85 and 1.9) is lower than
typical read to write ratios for file system accesses.
Paging activity is loosely correlated with load average
or number of users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fateyev:1991:CEA,
author = "A. E. Fateyev and S. M. Porotskiy and V. I. Drujinin",
title = "Comparative evaluation of approximate methods for
modelling of network systems",
journal = j-SIGMETRICS,
volume = "19",
number = "2",
pages = "44--48",
month = aug,
year = "1991",
CODEN = "????",
DOI = "https://doi.org/10.1145/122564.122569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The paper discloses the results of a comparative
evaluation of several approximate methods of queueing
network analysis concerning their accuracy, fields of
validity and computational consumptions; the comparison
is being carried out with varying values of network
parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nangia:1992:BRP,
author = "Ashvini Nangia",
title = "Book Review: {`Performance Analysis of Transaction
Processing Systems' by Wilbur H. Highleyman (Prentice
Hall, 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "9--11",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045110",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book deals with issues related to performance
analysis of a special class of real-time computing
systems called transaction processing systems. Even
though the book primarily discusses OLTP (On-line
Transaction Processing) architectures, it provides an
excellent text for performance evaluation of operating
systems and file systems. In many cases the author
discusses the effect of multiple processors on
performance of the overall system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meng:1992:BRC,
author = "Xiannong Meng",
title = "Book Review: {`Computer Networks and Systems: Queueing
Theory and Performance Evaluation' by Thomas G.
Robertazzi (Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "11--12",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045111",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This is a book intended for first-year graduate level
courses in statistical performance evaluation. The book
can be used for both network performance and computer
system performance courses although the emphasis is on
computer networks. It assumes a background in computer
networks (first graduate course). Readers should have
solid mathematics background if they use this book as
self-study material. The book does provide a very brief
review on probability theory, but this is not detailed
enough if the readers did not have probability
before.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRS,
author = "?. Finkel",
title = "Brief review: {`Stochastic Modeling and the Theory of
Queues' by Ronald W. Wolfe (Prentice-Hall, 1989)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "12--12",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is intended for a first-year graduate course
in stochastic processes, and queueing theory. It is
mathematically rigorous, and requires a substantial
background in probability theory. The first chapter
provides a review of the necessary topics from
probability theory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRMa,
author = "?. Finkel",
title = "Brief review: {`Markovian Queues' by O. P. Sharma
(Ellis Horwood Publishers 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "12--13",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045491",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This slim monograph presents a novel approach to
understanding the behavior of the M/M/1 queue and of
other Markovian queues with finite capacity. The basic
idea is to construct a two-dimensional model of the
queueing system, where the two dimensions represent the
number of customers who have arrived to the system, and
the number of customers who have departed. A
closed-form solution is then obtained for this model,
from which various performance measures of interest can
be derived. The author also presents transient analysis
of certain Markovian queues based on this same
approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRB,
author = "?. Finkel",
title = "Brief review: {`The Benchmark Handbook: Database and
Transaction Processing Systems,' Ed. by Jim Gray
(Morgan Kaufmann Publishers, Inc., 1991)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "13--13",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045493",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is unique in the performance literature, and
provides a valuable service to those interested in
benchmarking database and transaction processing
systems, or who are interested in benchmarking in
general. The Introduction was written by the editor,
and explains the structure of the book, and has a
discussion of benchmarking in general, explaining the
need for benchmarks, design criteria for benchmarks,
and an overview of the benchmarks presented in the
book.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1992:BRMb,
author = "?. Finkel",
title = "Brief review: {``Modeling and Analysis of Local Area
Networks'' by Paul J. Fortier and George Desrochers
(CRC Press, 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "13--13",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.1045492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "According to the author, this book is intended for
network researchers, users, designers and evaluators,
to enable them to make informed decisions about network
design and configuration. Except for the lack of
exercises, this book could also be used as a textbook
in this area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:1992:SWC,
author = "Michael W. Berry",
title = "Scientific workload characterization by loop-based
analyses",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "17--29",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130952",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of scientific and engineering benchmarks have
emerged during the 1980's. Each of these benchmarks has
a different origin, methodology and interpretation.
This report presents a case study of two current
scientific benchmarks and includes a comparison of them
based on their instruction mixes as measured by the
CRAY X-MP {\em hardware performance monitor\/} (hpm).
This particular case study was conducted by graduate
students in a Performance Evaluation course taught
during Spring Quarter 1991 in the Department of
Computer and Information Sciences at the University of
Alabama at Birmingham. Students analyzed the dominant
loops of the application-based Perfect Benchmarks and
noted (where applicable) significant performance
comparisons with the loop-based Livermore Fortran
Kernels. Whether or not any collection of kernel or
loop-based benchmarks can effectively predict the
performance of more sophisticated scientific
application programs is not clear. This case study does
reveal, however, the types of loops which are most
prevalent in codes from various scientific applications
and what their impact is on the overall performance of
these applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Council:1992:CTR,
author = "Corporate Transaction Processing Performance Council",
title = "Complete {TPC} results (as of 9/30/91)",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "32--35",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Deike-Glindemann:1992:SPE,
author = "Hartmut Deike-Glindemann",
title = "{SIQUEUE-PET}: an environment for queueing network
modelling",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "36--44",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Developing models for performance evaluation of
computer systems, logistic systems etc. often is a
complex task. The effort can be considerably reduced if
appropriate software tools are available. In many cases
queueing network models are suitable for solving the
problem to a sufficient degree of accuracy. SIQUEUE-PET
provides an environment for construction, evaluation
and result representation of such models. The user is
assisted through a graphical interface for model
construction as well as for result representation. The
availability of a support for object management
provides further alleviation in the modelling
activities. This contribution gives a brief overview of
the main features of SIQUEUE-PET. From the viewpoint of
modelling style, the availability of aggregation
techniques and the capability of processing
hierarchically structured models is to be emphasized.
An example is included for illustrative purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dujmovic:1992:UMS,
author = "Jozo J. Dujmovi{\'c}",
title = "The use of multiple-subscripted arrays in benchmark
programs",
journal = j-SIGMETRICS,
volume = "19",
number = "3",
pages = "45--48",
month = feb,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/130951.130955",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the effects of using
multiple-subscripted arrays in benchmark programs. We
identify and exemplify typical problems caused by
multiple-subscripted arrays and show why their usage in
benchmarking should be strictly controlled and
frequently restricted. Multiple-subscripted arrays can
be considered harmful in the case of general purpose
processor-bound benchmarks. On the other hand, the
multiple-subscripted arrays are shown to be suitable
for measuring the optimizing features of compilers,
especially for RISC machines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pooley:1992:BRC,
author = "Rob Pooley",
title = "Book Reviews: {`Computer and Communication Systems
Performance Modelling' by Peter J. B. King (Prentice
Hall 1990)}",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "13--14",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.1044850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book offers a simple and short introduction to
the theory of queueing models of computer and
communication systems. It consists of 14 chapters.
After the first, which gives the motivation and a
feeling for the subject (among other things, by an
informal proof and some simple illustrations of
Little's theorem), there are two preparatory chapters
on probability theory and stochastic processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hac:1992:MDF,
author = "Anna Hac",
title = "Modeling distributed file systems",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "22--27",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.140729",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes different methods and techniques
used to model, analyze, evaluate and implement
distributed file systems. Distributed file systems are
characterized by the distributed system hardware and
software architecture, in which they are implemented as
well as by the file systems' functions. In addition,
distributed file system performance depends on the load
executed in the system. Modeling and analysis of
distributed file systems requires new methods to
approximate complexity of the system and to provide a
useful solution. The complexity of the distributed file
system is reflected in the possible placement of the
files, file replication, and migration of files and
processes. The synchronization mechanisms are needed to
control file access. File sharing involves load sharing
in distributed environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Molloy:1992:ANB,
author = "Michael K. Molloy",
title = "Anatomy of the {NHFSSTONES} benchmarks",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "28--39",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.140731",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is intended to provide some insight into
the NHFSSTONES benchmark operations and how one may
interpret the results. This white paper covers the
reasons for the benchmarks, the basics of their
operation, the differences between the original
benchmark and its descendants, and finally some
instructions on how to run the benchmark.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keown:1992:RTP,
author = "William F. {Keown, Jr.} and Philip {Koopman, Jr.} and
Aaron Collins",
title = "Real-time performance of the {HARRIS RTX 2000} stack
architecture versus the {Sun 4 SPARC} and the {Sun 3
M68020} architectures with a proposed real-time
performance benchmark",
journal = j-SIGMETRICS,
volume = "19",
number = "4",
pages = "40--48",
month = may,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/140728.140733",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:12:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This study compares a stack machine, the Harris RTX
2000, a RISC machine, the Sun 4/SPARC, and a CISC
machine, the Sun3/M68020 for real-time applications. An
attempt is made to compare the generic features of each
machine which are characteristic of their architectural
classes as opposed to being characteristic of the
individual machine only. Performance is compared based
on execution of the Stanford Integer Benchmark series
and on interrupt response characteristics. A simple
Real-Time Performance BenchMark which integrates raw
compute power and interrupt response is proposed, then
used to estimate the real-time performance of the
machines. It is shown that the RTX 2000 outperforms the
others for applications which have a very large number
of interrupts per second, confirming that stack
architectures should perform well in real-time
applications such as high-speed computer communication
systems. For less interrupt intensive applications, the
Sun 4 SPARC performs better.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:1992:MAM,
author = "Margaret Martonosi and Anoop Gupta and Thomas
Anderson",
title = "{MemSpy}: analyzing memory system bottlenecks in
programs",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "1--12",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To cope with the increasing difference between
processor and main memory speeds, modern computer
systems use deep memory hierarchies. In the presence of
such hierarchies, the performance attained by an
application is largely determined by its memory
reference behavior --- if most references hit in the
cache, the performance is significantly higher than if
most references have to go to main memory. Frequently,
it is possible for the programmer to restructure the
data or code to achieve better memory reference
behavior. Unfortunately, most existing performance
debugging tools do not assist the programmer in this
component of the overall performance tuning task. This
paper describes MemSpy, a prototype tool that helps
programmers identify and fix memory bottlenecks in both
sequential and parallel programs. A key aspect of
MemSpy is that it introduces the notion of data
oriented, in addition to code oriented, performance
tuning. Thus, for both source level code objects and
data objects, MemSpy provides information such as cache
miss rates, causes of cache misses, and in
multiprocessors, information on cache invalidations and
local versus remote memory misses. MemSpy also
introduces a concise matrix presentation to allow
programmers to view both code and data oriented
statistics at the same time. This paper presents design
and implementation issues for MemSpy, and gives a
detailed case study using MemSpy to tune a parallel
sparse matrix application. It shows how MemSpy helps
pinpoint memory system bottlenecks, such as poor
spatial locality and interference among data
structures, and suggests paths for improvement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Whalley:1992:FIC,
author = "David B. Whalley",
title = "Fast instruction cache performance evaluation using
compile-time analysis",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "13--22",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache simulation; instruction cache; trace analysis;
trace generation",
}
@Article{LaRowe:1992:ADP,
author = "Richard P. {LaRowe, Jr.} and Mark A. Holliday and
Carla Schlatter Ellis",
title = "An analysis of dynamic page placement on a {NUMA}
multiprocessor",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "23--34",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The class of NUMA (nonuniform memory access time)
shared memory architectures is becoming increasingly
important with the desire for larger scale
multiprocessors. In such machines, the placement and
movement of code and data are crucial to performance.
The operating system can play a role in managing
placement through the policies and mechanisms of the
virtual memory subsystem. In this paper, we develop an
analytic model of memory system performance of a
Local/Remote NUMA architecture based on approximate
mean-value analysis techniques. The model assumes that
a simple workload model based on a few parameters can
often provide insight into the general behavior of real
applications. The model is validated against
experimental data obtained with the DUnX operating
system kernel for the BBN GP1000 while running a
synthetic workload. The results of this validation show
that in general, model predictions are quite good,
though in some cases the model fails to include the
effect of unexpected behaviors in the implementation.
Experiments investigate the effectiveness of dynamic
multiple-copy page placement. We investigate the cost
of incorrect policy decisions by introducing different
percentages of policy error and measuring their effect
on performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicola:1992:AGC,
author = "Victor F. Nicola and Asit Dan and Daniel M. Dias",
title = "Analysis of the generalized clock buffer replacement
scheme for database transaction processing",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "35--46",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133084",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The CLOCK algorithm is a popular buffer replacement
algorithm because of its simplicity and its ability to
approximate the performance of the Least Recently Used
(LRU) replacement policy. The Generalized Clock
(GCLOCK) buffer replacement policy uses a circular
buffer and a weight associated with each page brought
in buffer to decide on which page to replace. We
develop an approximate analysis for the GCLOCK policy
under the Independent Reference Model (IRM) that
applies to many database transaction processing
workloads. We validate the analysis for various
workloads with data access skew. Comparison with
simulations shows that in all cases examined the error
is extremely small (less than 1\%). To show the
usefulness of the model we apply it to a Transaction
Processing Council benchmark A (TPC-A) like workload.
If knowledge of the different data partitions in this
workload is assumed, the analysis shows that, with
appropriate choice of weights, the performance of the
GCLOCK algorithm can be better than the LRU policy.
Performance very close to that for optimal (static)
buffer allocation can be achieved by assigning
sufficiently high weights, and can be implemented with
a reasonably low overhead. Finally, we outline how the
model can be extended to capture the effect of page
invalidation in a multinode system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:1992:CCC,
author = "S. C. Borst and O. J. Boxma and M. B. Comb{\'e}",
title = "Collection of customers: a correlated {M/G/1} queue",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "47--59",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jacquet:1992:STD,
author = "Philippe Jacquet",
title = "Subexponential tail distribution in {LaPalice}
queues",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "60--69",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1992:RBC,
author = "Duan-Shin Lee and Bhaskar Sengupta",
title = "A reservation based cyclic server queue with limited
service",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "70--77",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine a problem which is an
extension of the limited service in a queueing system
with a cyclic server. In this service mechanism, each
queue, after receiving service in cycle $j$, makes a
reservation for its service requirement in cycle $ j +
1$. In this paper, we consider symmetric case only,
i.e., the arrival rates to all the queues are the same.
The main contribution to queueing theory is that we
propose an approximation for the queue length and
sojourn-time distributions for this discipline. Most
approximate studies on cyclic queues, which have been
considered before, examine the means only. Our method
is an iterative one, which we prove to be convergent by
using stochastic dominance arguments. We examine the
performance of our algorithm by comparing it to
simulations and show that the results are very good.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramakrishnan:1992:AFT,
author = "K. K. Ramakrishnan and Prabuddha Biswas and
Ramakrishna Karedla",
title = "Analysis of file {I/O} traces in commercial computing
environments",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "78--90",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Improving the performance of the file system is
becoming increasingly important to alleviate the effect
of I/O bottlenecks in computer systems. To design
changes to an existing file system or to architect a
new file system it is important to understand current
usage patterns. In this paper we analyze file I/O
traces of several existing production computer systems
to understand file access behavior. Our analysis
suggests that a relatively small percentage of the
files are active. The amount of total data active is
also quite small for interactive environments. An
average file encounters a relatively small number of
file opens while receiving an order of magnitude larger
number of reads to it. An average process opens quite a
large number of files over a typical prime time period.
What is more significant is that the effect of outliers
on many of the characteristics we studied is dominant.
A relatively small number of processes dominate the
activity, and a very small number of files receive most
of these operations. In addition, we provide a
comprehensive analysis of the dynamic sharing of files
in each of these enviroments, addressing both the
simultaneous and sequential sharing aspects, and the
activity to these shared files. We observe that
although only a third of the active files are
sequentially shared, they receive a very large
proportion of the total operations. We analyze the
traces from a given environment across different
lengths of time, such as one hour, three hour and whole
work-day intervals and do this for 3 different
environments. This gives us an idea of the shortest
length of the trace needed to have confidence in the
estimation of the parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sandhu:1992:CBF,
author = "Harjinder S. Sandhu and Songnian Zhou",
title = "Cluster-based file replication in large-scale
distributed systems",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "91--102",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing need for data sharing in large-scale
distributed systems may place a heavy burden on
critical resources such as file servers and networks.
Our examination of the workload in one large commercial
engineering environment shows that wide-spread sharing
of unstable files among tens to hundreds of users is
common. Traditional client-based file caching
techniques are not scalable in such environments. We
propose Frolic, a scheme for cluster-based file
replication in large-scale distributed file systems. A
cluster is a group of workstations and one or more file
servers on a local area network. Large distributed
systems may have tens or hundreds of clusters connected
by a backbone network. By dynamically creating and
maintaining replicas of shared files on the file
servers in the clusters using those files, we
effectively reduce reliance on central servers
supporting such files, as well as reduce the distances
between the accessing sites and data. We propose and
study algorithms for the two main issues in Frolic, (1)
locating a valid file replica, and (2) maintaining
consistency among replicas. Our simulation experiments
using a statistical workload model based upon
measurement data and real workload characteristics show
that cluster-based file replication can significantly
reduce file access delays and server and backbone
network utilizations in large-scale distributed systems
over a wide range of workload conditions. The workload
characteristics most critical to replication
performance are: the size of shared files, the number
of clusters that modify a file, and the number of
consecutive accesses to files from a particular
cluster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merchant:1992:PAD,
author = "Arif Merchant and Kun-Lung Wu and Philip S. Yu and
Ming-Syan Chen",
title = "Performance analysis of dynamic finite versioning for
concurrent transaction and query processing",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "103--114",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we analyze the performance of dynamic
finite versioning (DFV) schemes for concurrent
transaction and query processing, where a finite number
of consistent snapshots can be derived for query
access. We develop analytical models based on a renewal
process approximation to evaluate the performance of
DFV using $ M \geq 2 $ snapshots. The storage overhead
and obsolescence faced by queries are measured.
Simulation is used to validate the analytical models
and to evaluate the trade-offs between various
strategies for advancing snapshots when $ M > 2 $. The
results show that (1) the analytical models match
closely with simulation; (2) both the storage overhead
and obsolescence are sensitive to the
snapshot-advancing strategies, especially for $ M > 2 $
snapshots; and (3) generally speaking, increasing the
number of snapshots demonstrates a trade-off between
storage overhead and query obsolescence. For cases with
skewed access or low update rates, a moderate increase
in the number of snapshots beyond 2 can substantially
reduce the obsolescence, while the storage overhead may
increase only slightly, or even decrease in some cases.
Moreover, for very low update rates, a large number of
snapshots demonstrates a trade-off between storage
overhead and query obsolescence. For cases with skewed
access or low update rates, a moderate increase in the
number of snapshots beyond 2 can substantially reduce
the obsolescence, while the storage overhead may
increase only slightly, or even decrease in some cases.
Moreover, for very low update rates, a large number of
snapshots can be used to reduce the obsolescence to
almost zero without increasing the storage overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:1992:PAL,
author = "Alexander Thomasian",
title = "Performance analysis of locking policies with limited
wait depth",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "115--127",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133095",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a transaction processing system
with the standard two-phase locking (2PL) concurrency
control (CC) method (with the general waiting policy
upon a lock conflict) may be degraded significantly due
to transaction blocking in a high lock contention
environment. In the limit this effect leads to the
thrashing phenomenon, i.e., the majority of the
transactions in the system become blocked. Limiting the
wait depth of blocked transactions is an effective
method to increase the number of active transactions in
the system and to prevent thrashing, but this is at the
cost of additional processing due to transaction
restarts. The no-waiting (or immediate restart) policy
limits the wait-depth to zero, while cautious waiting
and the running priority policies use different methods
to limit the wait depth to one. A variant of the wait
depth limited (WDL) policy [8] also limits the wait
depth to one, while attempting to minimize the wasted
processing incurred by transaction aborts. A unified
methodology to analyze the performance of the 2PL CC
method with limited wait depth policies in a system
with multiple transaction classes is described in this
paper. The analysis is based on Markov chains
representing the execution steps of each transaction in
isolation, but as affected by hardware resource and
data contention with other transactions in the system.
Since the transition rates of the Markov chain are not
known a priori, an iterative solution method is
developed, which is then applied to the running
priority and WDL policies. Simulation is used for
validating the accuracy of the approximate analytic
solutions. Of interest are the conservation laws
governing the rate at which locks are transferred among
transactions, which can be used to verify the
correctness of the analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kurose:1992:CPS,
author = "Jim Kurose",
title = "On computing per-session performance bounds in
high-speed multi-hop computer networks",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "128--139",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a technique for computing upper bounds on
the distribution of individual per-session performance
measures such as delay and buffer occupancy for
networks in which sessions may be routed over several
``hops.'' Our approach is based on first stochastically
bounding the distribution of the number of packets (or
cells) which can be generated by each traffic source
over various lengths of time and then ``pushing'' these
bounds (which are then shown to hold over new time
interval lengths at various network queues) through the
network on a per-session basis. Session performance
bounds can then be computed once the stochastic bounds
on the arrival process have been characterized for each
session at all network nodes. A numerical example is
presented and the resulting distributional bounds
compared with simulation as well as with a point-valued
worst-case performance bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lui:1992:AAB,
author = "John C. S. Lui and Richard R. Muntz",
title = "Algorithmic approach to bounding the mean response
time of a minimum expected delay routing system",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "140--151",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133099",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an algorithmic approach to
bounding the mean response time of a multi-server
system in which the minimum expected delay routing
policy issued, i.e., an arriving job will join the
queue which has the minimal expected value of
unfinished work. We assume the queueing system to have
$K$ servers, each with an infinite capacity queue. The
arrival process is Poisson with parameter $ \lambda $,
and the service time of server $i$ is exponentially
distributed with mean $ 1 / \mu_i, 1 \leq i \leq K$.
The computation algorithm we present allows one to
tradeoff accuracy and computational cost. Upper and
lower bounds on the expected response time and expected
number of customers are computed; the spread between
the bounds can be reduced with additional space and
time complexity. Examples are presented which
illustrate the excellent relative accuracy attainable
with relatively little computation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:1992:SSE,
author = "Edmundo {de Souza e Silva} and Pedro Meji{\'a} Ochoa",
title = "State space exploration in {Markov} models",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "152--166",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133100",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance and dependability analysis is usually
based on Markov models. One of the main problems faced
by the analyst is the large state space cardinality of
the Markov chain associated with the model, which
precludes not only the model solution, but also the
generation of the transition rate matrix. However, in
many real system models, most of the probability mass
is concentrated in a small number of states in
comparison with the whole state space. Therefore,
performability measures may be accurately evaluated
from these ``high probable'' states. In this paper, we
present an algorithm to generate the most probable
states that is more efficient than previous algorithms
in the literature. We also address the problem of
calculating measures of interest and show how bounds on
some measures can be efficiently calculated.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Owicki:1992:FPA,
author = "Susan S. Owicki and Anna R. Karlin",
title = "Factors in the performance of the {AN1} computer
network",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "167--180",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "AN1 (formerly known as Autonet) is a local area
network composed of crossbar switches interconnected by
100Mbit/second, full-duplex links. In this paper, we
evaluate the performance impact of certain choices in
the AN1 design. These include the use of FIFO input
buffering in the crossbar switch, the
deadlock-avoidance mechanism, cut-through routing,
back-pressure for flow control, and multi-path routing.
AN1's performance goals were to provide low latency and
high bandwidth in a lightly loaded network. In this it
is successful. Under heavy load, the most serious
impediment to good performance is the use of FIFO input
buffers. The deadlock-avoidance technique has an
adverse effect on the performance of some topologies,
but it seems to be the best alternative, given the
goals and constraints of the AN1 design. Cut-through
switching performs well relative to store-and-forward
switching, even under heavy load. Back-pressure deals
adequately with congestion in a lightly-loaded network;
under moderate load, performance is acceptable when
coupled with end-to-end flow control for bursts.
Multi-path routing successfully exploits redundant
paths between hosts to improve performance in the face
of congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shankar:1992:PCR,
author = "A. Udaya Shankar and Cengiz Alaettino{\u{g}}lu and
Ibrahim Matta and Klaudia Dussa-Zieger",
title = "Performance comparison of routing protocols using
{MaRS}: distance-vector versus link-state",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "181--192",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There are two approaches to adaptive routing protocols
for wide-area store-and-forward networks:
distance-vector and link-state. Distance-vector
algorithms use $ O(N \times e) $ storage at each node,
whereas link-state algorithms use $ O(N^2) $, where $N$
is the number of nodes in the network and $e$ is the
average degree of a node. The ARPANET started with a
distance-vector algorithm (Distributed Bellman-Ford),
but because of long-lived loops, changed to a
link-state algorithm (SPF). We show, using a recently
developed network simulator, MaRS, that a newly
proposed distance-vector algorithm (ExBF) performs as
well as SPF. This suggests that distance-vector
algorithms are appropriate for very large wide-area
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:1992:CLC,
author = "Eitan Altman and Philippe Nain",
title = "Closed-loop control with delayed information",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "193--204",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133106",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The theory of Markov Control Model with Perfect State
Information (MCM-PSI) requires that the current state
of the system is known to the decision maker at
decision instants. Otherwise, one speaks of Markov
Control Model with Imperfect State Information
(MCM-ISI). In this article, we introduce a new class of
MCM-ISI, where the information on the state of the
system is delayed. Such an information structure is
encountered, for instance, in high-speed data networks.
In the first part of this article, we show that by
enlarging the state space so as to include the last
known state as well as all the decisions made during
the travel time of the information, we may reduce a
MCM-ISI to a MCM-PSI. In the second part of this paper,
this result is applied to a flow control problem.
Considered is a discrete time queueing model with
Bernoulli arrivals and geometric services, where the
intensity of the arrival stream is controlled. At the
beginning of slot t+1, t=0,1,2,\ldots{}, the decision
maker has to select the probability of having one
arrival in the current time slot from the set {p1, p2},
0 \leq p2 p1 \leq 1, only on the basis of the
queue-length and action histories in [0, t]. The aim is
to optimize a discounted throughput/delay criterion. We
show that there exists an optimal policy of a threshold
type, where the threshold is seen to depend on the last
action.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Merchant:1992:AMC,
author = "Arif Merchant",
title = "Analytical models of combining {Banyan} networks",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "205--212",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133107",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present in this paper an analytical model of a
multistage combining Banyan network with output
buffered switches, in hot-sport traffic. In a combining
network, packets bound for the same destination are
combined into one if they meet at a switch; this
alleviates the problem of tree-saturation caused by
hot-spot traffic. We model the flow processes in the
network as Markov chains and recursively approximate
the departure processes of each stage of the network in
terms of the departure processes of the preceding
stage. This model is used to predict the throughput of
the combining network, and comparison with simulation
results shows the prediction to be accurate. A modified
combining scheme based on low priorities for hot
packets is proposed and analyzed. It is shown that this
scheme yields substantial improvements in throughput
over the standard combining scheme.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akyildiz:1992:PAT,
author = "Ian F. Akyildiz and Liang Chen and Samir R. Das and
Richard M. Fujimoto and Richard F. Serfozo",
title = "Performance analysis of ``{Time Warp}'' with limited
memory",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "213--224",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133109",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The behavior of $n$ interacting processes synchronized
by the ``Time Warp'' rollback mechanism is analyzed
under the constraint that the total amount of memory to
execute the program is limited. In Time Warp, a
protocol called ``cancelback'' has been proposed to
reclaim storage when the system runs out of memory. A
discrete state, continuous time Markov chain model for
Time Warp augmented with the cancelback protocol is
developed for a shared memory system with $n$
homogeneous processors and homogeneous workload. The
model allows one to predict speedup as the amount of
available memory is varied. To our knowledge, this is
the first model to achieve this result. The performance
predicted by the model is validated through direct
performance measurements on an operational Time Warp
system executing on a shared-memory multiprocessor
using a workload similar to that in the model. It is
observed that Time Warp with only a few additional
message buffers per processor over that required in the
corresponding sequential execution can achieve
approximately the same or even greater performance than
Time Warp with unlimited memory, if GVT computation and
fossil collection can be efficiently implemented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Turek:1992:SPT,
author = "John Turek and Joel L. Wolf and Krishna R. Pattipati
and Philip S. Yu",
title = "Scheduling parallelizable tasks: putting it all on the
shelf",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "225--236",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133111",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we formulate the following natural
multiprocessor scheduling problem: Consider a parallel
system with $P$ processors. Suppose that there are $N$
tasks to be scheduled on this system, and that the
execution time of each task $ j_\epsilon \{ 1, \ldots
{}, N \} $ is a nonincreasing function $ t_j(\beta_j)$
of the number of processors $ \beta_j \epsilon \{ 1,
\ldots {}, P \} $ allotted to it. The goal is to find,
for each task $j$, an allotment of processors $
\beta_j$, and, overall, a schedule assigning the tasks
to the processors which minimizes the makespan, or
latest task completion time. The so-called shelf
strategy is commonly used for orthogonal rectangle
packing, a related and classic optimization problem.
The prime difference between the orthogonal rectangle
problem and our own is that in our case the rectangles
are, in some sense, malleable: The height of each
rectangle is a nonincreasing function of its width. In
this paper, we solve our multiprocessor scheduling
problem exactly in the context of a shelf-based
paradigm. The algorithm we give uses techniques from
resource allocation theory and employs a variety of
other combinatorial optimization techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bremaud:1992:SLR,
author = "P. Br{\'e}maud and W.-B. Gong",
title = "Stationary likelihood ratios and smoothed perturbation
analysis gradient estimates for the routing problem",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "237--238",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.114676",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present stationary and regenerative form estimates
for the gradients of the cycle variables with respect
to a thinning parameter in the arrival process of G/G/1
queueing systems. Our estimates belong to the category
of the likelihood ratio method (LRM) and smoothed
perturbation analysis (SPA) estimates. The results are
useful in adaptive routing design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Candlin:1992:SPP,
author = "Rosemary Candlin and Peter Fisk and Joe Phillips and
Neil Skilling",
title = "Studying the performance properties of concurrent
programs by simulation experiments on synthetic
programs",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "239--240",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have developed a methodology for constructing
performance models of different types of concurrent
programs, and hence obtaining estimates of execution
times on different multiprocessor machines. A given
class of program is characterized in terms of a small
set of parameters which summarise the behaviour of the
program over time. Synthetic programs with selected
sets of parameters can then be generated and their
execution simulated on a model of some given parallel
machine. By varying the parameters systematically, we
can discover which factors most affect performance. Our
approach has been to conduct factorial experiments from
which we can obtain quantitative predictions of
performance for arbitrary concurrent programs whose
parameter values lie within the extreme factor levels,
and whose synchronization behaviour conforms to one of
a number of common patterns.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:1992:CIP,
author = "Robert F. Berry and Joseph L. Hellerstein",
title = "Characterizing and interpreting periodic behavior in
computer systems",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "241--242",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rahm:1992:HPC,
author = "Erhard Rahm and Donald Ferguson",
title = "High performance cache management for sequential data
access",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "243--244",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chakka:1992:MSG,
author = "Ram Chakka and Isi Mitrani",
title = "Multiprocessor systems with general breakdowns and
repairs (extended abstract)",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "245--246",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133143",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brewer:1992:PHP,
author = "Eric A. Brewer and Chrysanthos N. Dellarocas and
Adrian Colbrook and William E. Weihl",
title = "{PROTEUS}: a high-performance parallel-architecture
simulator",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "247--248",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meliksetian:1992:PAC,
author = "Dikran S. Meliksetian and C. Y. Roger Chen",
title = "Performance analysis of communications in static
interconnection networks",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "249--250",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a model, based on a network of DX/D/1
queues, to predict the communication performance of
static interconnection networks under various
communication patterns. Our model predicts delay time
distributions in the links as well as the first and
second moments of the overall delay time of messages in
the system. These predictions are verified by the
results of simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dan:1992:CDA,
author = "Asit Dan and Philip S. Yu and Jen-Yao Chung",
title = "Characterization of database access skew in a
transaction processing environment",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "251--252",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The knowledge of access skew (non-uniform access) in
each database relation is useful for both workload
management (buffer pool allocation, transaction
routing, etc.), as well as capacity planning for
changing workload mix. However, it is a challenging
problem to characterize the access skew of a real
database workload in a simple manner that can easily be
used to compute the buffer hit probability under the
LRU replacement policy. A concise way to characterize
the access skew is proposed by assuming that the large
number of data pages may be logically grouped into a
small number of partitions such that the frequency of
accessing each page within a partition can be treated
as equal. Based on this approach, a recursive binary
partitioning algorithm is presented that can infer the
access skew from the buffer hit probabilities for a
subset of the buffer sizes. This avoids explicit
estimation of individual access frequencies for the
large number of database pages. The method is validated
of its ability to predict buffer hit from the skew
characterization using production database traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1992:XPE,
author = "Aloke Gupta and Wen-Mei W. Hwu",
title = "{Xprof}: profiling the execution of {X Window}
programs",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "253--254",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shoham:1992:ETP,
author = "Ruth Shoham and Uri Yechiali",
title = "Elevator-type polling systems (abstract)",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "255--257",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/149439.133154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baccelli:1992:PSS,
author = "Fran{\c{c}}ois Baccelli and Miguel Canales",
title = "Parallel simulation of stochastic {Petri} nets using
recurrence equations",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "257--258",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Petri nets provide a powerful modeling formalism,
which allows one to describe and study various classes
of systems, such as synchronous and asynchronous
processes, and/or parallel or sequential ones. We
present below a software package, currently under
development, that allows the user to specify a
stochastic marked graph [1] using either a graphical
interface or a specification language. From this
specification a simulation program for a Single
Instruction Multiple Data (SIMD) parallel machine is
generated. A Connection Machine 2 (CM2) is used as the
architecture for running this program.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jobmann:1992:PAP,
author = "Manfred R. Jobmann and Johann Schumann",
title = "Performance analysis of a parallel theorem prover",
journal = j-SIGMETRICS,
volume = "20",
number = "1",
pages = "259--260",
month = jun,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/133057.133158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:13:01 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanley:1992:TRN,
author = "Kim Shanley and Amie Belongia",
title = "{TPC} releases new benchmark: {TPC-C}",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "8--22",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pooley:1992:BRP,
author = "Rob Pooley",
title = "Book review: {`Performance Engineering of Software
Systems' by Connie U. Smith (Addison Wesley 1990)}",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "23--24",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.1044851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To those working in the field of performance, Connie
Smith should need no introduction. She is the author of
many papers which have sought to make accessible the
techniques of performance analysis and prediction to
practising software designers. She is probably the
first to have used the term `performance engineering'
to describe the application of such techniques to
software systems. The publication of a book which
encapsulates her ideas is therefore of considerable
interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Taylor:1992:BRQ,
author = "Stephen Taylor",
title = "Book review: {``Queuing Networks --- Exact
Computational Algorithms: A Unified Theory Based on
Decomposition and Aggregation'' by Adrian E. Conway and
Nicolas D. Georganas (MIT Press 1989)}",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "24--26",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.1044852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queuing Network models are an approach to modeling
real-world problems based on the abstractions of
servers, queues, and routing between them. Product-form
queuing networks have a particularly simple formula
describing the state distribution, and have accrued a
literature describing them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kobayashi:1992:CMM,
author = "Makoto Kobayashi",
title = "A cache multitasking model",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "27--37",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A hierarchical program behavior model in a
multitasking environment was proposed and applied to a
cache multitasking model for performance evaluation.
The hierarchical program behavior model consists of the
task switching model, execution interval model, and the
line (block) reference behavior model for each
individual task. An execution interval is a continuous
execution of a task between task switches. As a task
executes in an execution interval, it brings its lines
into a cache according to the line reference behavior
model. The Stack Growth Function (SGF) model was used
for this purpose. The state of a cache is defined by
the numbers of lines of the individual tasks. The state
of a cache at task switches then constitutes an
embedded Markov chain. Although a set of simultaneous
linear equations in steady state cannot exactly be
solved practically because of its excessively large
state space, it can be solved very efficiently by a
Monte-Carlo simulation. The model was validated against
the miss rate measured by a hardware monitor in a
controlled environment on a mainframe running IBM MVS
operating system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Porotskiy:1992:DTM,
author = "S. M. Porotskiy and A. E. Fateev",
title = "Development trends in methods for efficiency
evaluation of {ES}-based computer systems",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "38--42",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141864",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper concerns some methods for efficiency
evaluation of IBM-compatible universal ES computers, as
being improved during their short life-time. The
multi-level structure of computer efficiency is
described, and the factors influencing its
quantification are pointed out. The measured results
are given on the capacity of individual computers with
different loads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Porotskiy:1992:SRP,
author = "S. M. Porotskiy and A. E. Fateev",
title = "System and real performance evaluation of computer",
journal = j-SIGMETRICS,
volume = "20",
number = "2",
pages = "43--46",
month = nov,
year = "1992",
CODEN = "????",
DOI = "https://doi.org/10.1145/141858.141865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to review some methods
for efficiency evaluation of universal computer
systems. This paper is continued of [1] and concerns
the measurements and analytical modeling for
performance evaluation on system and real levels.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vandeLiefvoort:1993:BRM,
author = "Apple van de Liefvoort",
title = "Book review: {``Multiple Access Protocols: Performance
and Analysis'' by Raphael Rom and Moshe Sidi
(Springer-Verlag, 1990)}",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "5--6",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.1044950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiple Access Protocols, the focus of this book, are
the rules and procedures which dictate the behavior of
switches and channels in computer networks, they are
the channel allocation schemes that can be found in the
medium Access Control layer in the OSI reference model.
Most of us have heard of FDMA, TDMA, CDMA, Ethernet,
CSMA, CSMA/CD, Aloha, token passing, packet switching,
or their many, many variations. According to their
preface, the authors aim this book at the student and
professional engineer who is (or will be) responsible
for the design and/or operation for such networks.
Rather than giving a vast compendium of protocols and
their analysis, they hope to give an understanding of
the behavior and operation of multiple access systems
through their performance analysis. They try to cover
all types of protocols for random access networks and
most of the analytical methods used in their
performance analysis with a uniform notation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{TPC:1993:STRa,
author = "{Corporate TPC}",
title = "Summary of {TPC} results (as of {December 22, 1992})",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "7--21",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.155769",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maffeis:1993:FAP,
author = "Silvano Maffeis",
title = "File access patterns in public {FTP} archives and an
index for locality of reference",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "22--35",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.155771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Global filesystems and new file transfer protocols are
a great need and challenge in the presence of
drastically growing networks. In this paper we present
results obtained from an investigation of access to
public files which took place over three months. This
work visualizes first results on the popularity of
public ftp files, on common operations (deletions,
updates and insertions) to public file-archives and on
encountered filesizes. An index for measuring locality
of reference to a resource is also proposed. The
results show that most file transfers relate to only a
small fraction of the files in an archive and that a
considerable part of the operations to public files are
updates of files. Further results are presented and
interpreted in the paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file transfer; filesizes; locality of reference;
popularity of files; replication",
}
@Article{Ulusoy:1993:AAR,
author = "{\"O}zg{\"u}r Ulusoy",
title = "An approximate analysis of a real-time database
concurrency control protocol via {Markov} modeling",
journal = j-SIGMETRICS,
volume = "20",
number = "3",
pages = "36--48",
month = mar,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155768.155773",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Transactions processed in a real-time database system
(RTDBS) are associated with real-time constraints
typically in the form of deadlines. Computer-integrated
manufacturing, the stock market, banking, and command
and control systems are several examples of RTDBS
applications where the timeliness of transaction
response is as important as the consistency of data.
Design of a RTDBS requires the integration of concepts
from both real-time systems and database systems to
handle the timing and consistency requirements
together; i.e., to execute transactions so as to both
meet the deadlines and maintain the database
consistency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{IBM:1993:SP,
author = "{Corporate IBM Systems Analysis Department}",
title = "Selected publications: 1992",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "3--9",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155777",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{TPC:1993:STRb,
author = "{Corporate TPC}",
title = "Summary of {TPC} results (as of {March} 15, 1993)",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "10--23",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raatikainen:1993:CAW,
author = "Kimmo E. E. Raatikainen",
title = "Cluster analysis and workload classification",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "24--30",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155781",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Clustering techniques are widely recommended tools for
workload classification. The k-means algorithm is
widely accepted as the `standard' technique of
detecting workload classes automatically from
measurement data. This paper examines validity of the
obtained workload classes, when the current system and
workload is analyzed by a queueing network model and
mean value analysis. Our results, based on one week's
accounting data of a VAX 8600, indicate that the
results of queueing network analysis are not stable
when the classes of workload are constructed through
the {\em k-means\/} algorithm. Therefore, we cannot
recommended that the most widely used clustering
technique should be used in any workload
characterization study without careful validation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1993:EPP,
author = "Robert B. Smith and James K. Archibald and Brent E.
Nelson",
title = "Evaluating performance of prefetching second level
caches",
journal = j-SIGMETRICS,
volume = "20",
number = "4",
pages = "31--42",
month = may,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/155775.155782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the increasing disparity between processor and
main memory system cycle times, many computer systems
are now incorporating two levels fo cache memory.
Several studies have been done on the design and
performance of second level caches, including [3] and
[20]. It certainly can and has been shown that the
addition of a second level of cache enhances the
performance of many systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1993:NAP,
author = "Peter M. Chen and David A. Patterson",
title = "A new approach to {I/O} performance evaluation:
self-scaling {I/O} benchmarks, predicted {I/O}
performance",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "1--12",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current I/O benchmarks suffer from several chronic
problems: they quickly become obsolete, they do not
stress the I/O system, and they do not help in
understanding I/O system performance. We propose a new
approach to I/O performance analysis. First, we propose
a self-scaling benchmark that dynamically adjusts
aspects of its workload according to the performance
characteristic of the system being measured. By doing
so, the benchmark automatically scales across current
and future systems. The evaluation aids in
understanding system performance by reporting how
performance varies according to each of fie workload
parameters. Second, we propose predicted performance, a
technique for using the results from the self-scaling
evaluation to quickly estimate the performance for
workloads that have not been measured. We show that
this technique yields reasonably accurate performance
estimates and argue that this method gives a far more
accurate comparative performance evaluation than
traditional single point benchmarks. We apply our new
evaluation technique by measuring a SPARCstation 1+
with one SCSI disk, an HP 730 with one SCSI-II disk, a
Sprite LFS DECstation 5000/200 with a three-disk disk
array, a Convex C240 minisupercomputer with a four-disk
disk array, and a Solbourne 5E/905 fileserver with a
two-disk disk array.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Biswas:1993:TDA,
author = "Prabuddha Biswas and K. K. Ramakrishnan and Don
Towsley",
title = "Trace driven analysis of write caching policies for
disks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "13--23",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166971",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The I/O subsystem in a computer system is becoming the
bottleneck as a result of recent dramatic improvements
in processor speeds. Disk caches have been effective in
closing this gap but the benefit is restricted to the
read operations as the write I/Os are usually committed
to disk to maintain consistency and to allow for crash
recovery. As a result, write I/O traffic is becoming
dominant and solutions to alleviate this problem are
becoming increasingly important. A simple solution
which can easily work with existing tile systems is to
use non-volatile disk caches together with a
write-behind strategy. In this study, we look at the
issues around managing such a cache using a detailed
trace driven simulation. Traces from three different
commercial sites are used in the analysis of various
policies for managing the write cache. We observe that
even a simple write-behind policy for the write cache
is effective in reducing the total number of writes by
over 50\%. We further observe that the use of
hysteresis in the policy to purge the write cache, with
two thresholds, yields substantial improvement over a
single threshold scheme. The inclusion of a mechanism
to piggyback blocks from the write cache with read miss
I/Os further reduces the number of writes to only about
15\% of the original total number of write operations.
We compare two piggybacking options and also study the
impact of varying the write cache size. We briefly
looked at the case of a single non-volatile disk cache
to estimate the performance impact of statically
partitioning the cache for reads and writes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sugumar:1993:ESC,
author = "Rabin A. Sugumar and Santosh G. Abraham",
title = "Efficient simulation of caches under optimal
replacement with applications to miss
characterization",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "24--35",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache miss characterization models such as the three
Cs model are useful in developing schemes to reduce
cache misses and their penalty. In this paper we
propose the OPT model that uses cache simulation under
optimal (OPT) replacement to obtain a finer and more
accurate characterization of misses than the three Cs
model. However, current methods for optimal cache
simulation are slow and difficult to use. We present
three new techniques for optimal cache simulation.
First, we propose a limited lookahead strategy with
error fixing, which allows one pass simulation of
multiple optimal caches. Second, we propose a scheme to
group entries in the OPT stack, which allows efficient
tree based fully-associative cache simulation under
OPT. Third, we propose a scheme for exploiting partial
inclusion in set-associative cache simulation under
OPT. Simulators based on these algorithms were used to
obtain cache miss characterizations using the OPT model
for nine SPEC benchmarks. The results indicate that
miss ratios under OPT are substantially lower than
those under LRU replacement, by up to 70\% in
fully-associative caches, and up to 32\% in two-way
set-associative caches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chame:1993:CIP,
author = "Jacqueline Chame and Michel Dubois",
title = "Cache inclusion and processor sampling in
multiprocessor simulations",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "36--47",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166977",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The evaluation of cache-based systems demands careful
simulations of entire benchmarks. Simulation efficiency
is essential to realistic evaluations. For systems with
large caches and large number of processors, simulation
is often too slow to be practical. In particular, the
optimized design of a cache for a multiprocessor is
very complex with current techniques. This paper
addresses these problems. First we introduce necessary
and sufficient conditions for cache inclusion in
systems with invalidations. Second, under cache
inclusion, we show that an accurate trace for a given
processor or for a cluster of processors can be
extracted from a multiprocessor trace. With this
methodology, possible cache architectures for a
processor or for a cluster of processors are evaluated
independently of the rest of the system, resulting in a
drastic reduction of the trace length and simulation
complexity. Moreover, many important system-wide
metrics can be estimated with good accuracy by
extracting the traces of a set of randomly selected
processors, an approach we call processor sampling. We
demonstrate the accuracy and efficiency of these
techniques by applying them to three 64-processor
traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reinhardt:1993:WWT,
author = "Steven K. Reinhardt and Mark D. Hill and James R.
Larus and Alvin R. Lebeck and James C. Lewis and David
A. Wood",
title = "The {Wisconsin Wind Tunnel}: virtual prototyping of
parallel computers",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "48--60",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166979",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We have developed a new technique for evaluating cache
coherent, shared-memory computers. The Wisconsin Wind
Tunnel (WWT) runs a parallel shared-memory program on a
parallel computer (CM-5) and uses execution-driven,
distributed, discrete-event simulation to accurately
calculate program execution time. WWT is a virtual
prototype that exploits similarities between the system
under design (the target) and an existing evaluation
platform (the host). The host directly executes all
target program instructions and memory references that
hit in the target cache. WWT's shared memory uses the
CM-5 memory's error-correcting code (ECC) as valid bits
for a fine-grained extension of shared virtual memory.
Only memory references that miss in the target cache
trap to WWT, which simulates a cache-coherence
protocol. WWT correctly interleaves target machine
events and calculates target program execution time.
WWT runs on parallel computers with greater speed and
memory capacity than uniprocessors. WWT's simulation
time decreases as target system size increases for
fixed-size problems and holds roughly constant as the
target system and problem scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adve:1993:IRD,
author = "Vikram S. Adve and Mary K. Vernon",
title = "The influence of random delays on parallel execution
times",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "61--73",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166982",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic models are widely used for the performance
evaluation of parallel programs and systems. The
stochastic assumptions in such models exe intended to
represent non-deterministic processing requirements as
well as random delays due to inter-process
communication end resource contention. In this paper,
we provide compelling analytical and experimental
evidence that in current and foreseeable shared-memory
programs, communication delays introduce negligible
variance into the execution time between
synchronization points. Furthermore, we show using
direct measurements of variance that other sources of
randomness, particularly non-deterministic
computational requirements, also do not introduce
significant variance in many programs. We then use two
examples to demonstrate the implications of these
results for parallel program performance prediction
models, as well as for general stochastic models of
parallel systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rosti:1993:KEM,
author = "E. Rosti and E. Smirni and T. D. Wagner and A. W. Apon
and L. W. Dowdy",
title = "The {KSR1}: experimentation and modeling of
poststore",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "74--85",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166985",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Kendall Square Research introduced the KSR1 system in
1991. The architecture is based on a ring of rings of
64-bit microprocessora. It is a distributed, shared
memory system and is scalable. The memory structure is
unique and is the key to understanding the system.
Different levels of caching eliminates physical memory
addressing and leads to the ALLCACHE\TM{} scheme. Since
requested data may be found in any of several caches,
the initial access time is variable. Once pulled into
the local (sub) cache, subsequent access times are
fixed and minimal. Thus, the KSR1 is a Cache-Only
Memory Architecture (COMA) system. This paper describes
experimentation and an analytic model of the KSR1. The
focus is on the poststore programmer option. With the
poststore option, the programmer can elect to broadcast
the updated value of a variable to all processors that
might have a copy. This may save time for threads on
other processors, but delays the broadcasting thread
and places additional traffic on the ring. The specific
issue addressed is to determine under what conditions
poststore is beneficial. The analytic model and the
experimental observations are in good agreement. They
indicate that the decision to use poststore depends
both on the application and the current system load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ganger:1993:PFM,
author = "Gregory R. Ganger and Yale N. Patt",
title = "The process-flow model: examining {I/O} performance
from the system's point of view",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "86--97",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166989",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Input/output subsystem performance is currently
receiving considerable research attention. Significant
effort has been focused on reducing average I/O
response times and increasing throughput for a given
workload. This work has resulted in tremendous advances
in I/O subsystem performance. It is unclear, however,
how these improvements will be reflected in overall
system performance. The central problem lies in the
fact that the current method of study tends to treat
all I/O requests aa equally important. We introduce a
three class taxonomy of I/O requests based on their
effects on system performance. We denote the three
classes {\em time-critical, time-limited, and
time-noncritical}. A system-level, trace-driven
simulation model has been developed for the purpose of
studying disk scheduling algorithms. By incorporating
knowledge of I/O classes, algorithms tuned for system
performance rather than I/O subsystem performance may
be developed. Traditional I/O subsystem simulators
would rate such algorithms unfavorably because they
produce suboptimal subsystem performance. By studying
the I/O subsystem via global, system-level simulation,
one can more easily identify changes that will improve
overall system performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1993:APM,
author = "Edward K. Lee and Randy H. Katz",
title = "An analytic performance model of disk arrays",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "98--109",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166994",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As disk arrays become widely used, tools for
understanding and analyzing their performance become
increasingly important. In particular, performance
models can be invaluable in both configuring and
designing disk arrays. Accurate analytic performance
models are preferable to other types of models because
they can be quickly evaluated, are applicable under a
wide range of system and workload parameters, and can
be manipulated by a range of mathematical techniques.
Unfortunately, analytic performance models of disk
arrays are difficult to formulate due to the presence
of {\em queueing\/} and {\em fork-join
synchronization\/}; a disk array request is broken up
into independent disk requests which must all complete
to satisfy the original request. In this paper, we
develop and validate an analytic performance model for
disk arrays. We derive simple equations for
approximating their utilization, response time and
throughput. We validate the analytic model via
simulation, investigate the error introduced by each
approximation used in deriving the analytic model, and
examine the validity of some of the conclusions that
can be drawn from the model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tang:1993:MMB,
author = "Dong Tang and Ravishankar K. Iyer",
title = "{MEASURE+}: a measurement-based dependability analysis
package",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "110--121",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166996",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most existing dependability modeling and evaluation
tools are designed for building and solving commonly
used models with emphasis on solution techniques, not
for identifying realistic models from measurements. In
this paper, a measurement-based dependability analysis
package, MEASURE+, is introduced. Given measured data
from real systems in a specified format MEASURE+ can
generate appropriate dependability models and measures
including Markov and semi-Markov models, $k$-out-of-$n$
availability models, failure distribution and hazard
functions, and correlation parameters. These models and
measures obtained from data are valuable for
understanding actual error/failure characteristics,
identifying system bottlenecks, evaluating
dependability for real systems, and verifying
assumptions made in analytical models. The paper
illustrates MEASURE+ by applying it to the data from a
VAXcluster multicomputer system. Models of field
failure behavior identified by MEASURE+ indicate that
both traditional models assuming failure independence
and those few taking correlation into account are not
representative of the actual occurrence process of
correlated failures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramesh:1993:STS,
author = "A. V. Ramesh and Kishor Trivedi",
title = "On the sensitivity of transient solutions of {Markov}
models",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "122--134",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.166998",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the sensitivity of transient solutions of
Markov models to perturbations in their generator
matrices. The perturbations can either be of a certain
structure or can be very general. We consider two
different measures of sensitivity and derive upper
bounds on them. The derived bounds are sharper than
previously reported bounds in the literature. Since the
sensitivity analysis of transient solutions is
intimately related to the condition of the exponential
of the CTMC matrix, we derive an expression for the
condition number of the CTMC matrix exponential which
leads to some interesting implications. We compare the
derived sensitivity bounds both numerically and
analytically with those reported in the literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1993:PSM,
author = "David M. Nicol and Philip Heidelberger",
title = "Parallel simulation of {Markovian} queueing networks
using adaptive uniformization",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "135--145",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167000",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a method for simulating a large
class of queueing network models with Markovian
phase-type distributions on parallel architectures. The
method, which is based on uniformization, exploits
Markovian properties that permit one to first build
schedules of simulation times at which processors ought
to synchronize, and then simulate a mathematically
correct sample path through the pre-chosen schedule.
While the technique eliminates many of the overheads
incurred by other synchronization methods, it may
suffer when the maximum rate (in simulation time) at
which one processor might possibly ever send jobs to
another is much larger than the average rate at which
it actually does. We show how to reduce these
overheads, sometimes doubling the execution rate as a
result. We discuss experiments performed on the Intel
iPSC/2 and Touchstone Delta architectures, where
speedups in excess of 155 are observed on 256
processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goldschmidt:1993:ATD,
author = "Stephen R. Goldschmidt and John L. Hennessy",
title = "The accuracy of trace-driven simulations of
multiprocessors",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "146--157",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167001",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In trace-driven simulation, traces generated for one
set of system characteristics are used to simulate a
system with different characteristics. However, the
execution path of a multiprocessor workload may depend
on the order of events occurring on different
processing elements. The event order, in turn, depends
on system characteristics such as memory-system
latencies and buffer-sizes. Trace-driven simulations of
multiprocessor workloads are inaccurate unless the
dependencies are eliminated from the traces. We have
measured the effects of these inaccuracies by comparing
trace-driven simulations to direct simulations of the
same workloads. The simulators predicted identical
performance only for workloads whose traces were
timing-independent. Workloads that used first-come
first-served scheduling and/or non-deterministic
algorithms produced timing-dependent traces, and
simulation of these traces produced inaccurate
performance predictions. Two types of performance
metrics were particularly affected: those related to
synchronization latency and those derived from
relatively small numbers of events. To accurately
predict such performance metrics, timing-independent
traces or direct simulation should be used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Setia:1993:PSM,
author = "Sanjeev K. Setia and Mark S. Squillante and Satish K.
Tripathi",
title = "Processor scheduling on multiprogrammed, distributed
memory parallel computers",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "158--170",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167002",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multicomputers, consisting of many processing nodes
connected through a high speed interconnection network,
have become an important and common platform for a
large body of scientific computations. These parallel
systems have traditionally executed programs in batch
mode, or have at most space-shared the processors among
multiple programs using a static partitioning policy.
This, however, can result in relatively low system
utilization and throughput for important classes of
scientific applications. In this paper we consider `a
class of scheduling policies that attempt to increase
processor utilization and system throughput by
timesharing a partition of processors among multiple
programs. We compare the system performance under this
multiprogramming policy with that of static
partitioning for a variety of workloads via both
analytic and simulation modeling. Our results show that
timesharing a partition can provide significant
improvements in performance, particularly at moderate
to heavy loads. The performance gains of the
multiprogrammed policy depend upon the inherent
efficiency of the parallel programs that comprise the
workload, decreasing with increasing program
efficiency. Our analysis also provides the regions over
which one scheduling policy outperforms the other, as a
function of system load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:1993:PCT,
author = "Kun-Lung Wu and Philip S. Yu and James Z. Teng",
title = "Performance comparison of thrashing control policies
for concurrent {Mergesorts} with parallel prefetching",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "171--182",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167003",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the performance of various run-time thrashing
control policies for the merge phase of concurrent
mergesorts using parallel prefetching, where initial
sorted runs are stored on multiple disks and the final
sorted run is written back to another dedicated disk.
Parallel prefetching via multiple disks can be
attractive in reducing the response times for
concurrent mergesorts. However, severe {\em
thrashing\/} may develop due to imbalances between
input and output rates, thus a large number of
prefetched pages in the buffer can be replaced before
referenced. We evaluate through detailed simulations
three run-time thrashing control policies: (a)
disabling prefetching, (b) forcing synchronous writes
and (c) lowering the prefetch quantity in addition to
forcing synchronous writes. The results show that (1)
thrashing resulted from parallel prefetching can
severely degrade the system response time; (2) though
effective in reducing the degree of thrashing,
disabling prefetching may worsen the response time
since more synchronous reads are needed; (3) forcing
synchronous writes can both reduce thrashing and
improve the response time; (4) lowering the prefetch
quantity in addition to forcing synchronous writes is
most effective in reducing thrashing and improving the
response time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meliksetian:1993:MMB,
author = "Dikran S. Meliksetian and C. Y. Roger Chen",
title = "A {Markov}-modulated {Bernoulli} process approximation
for the analysis of {Banyan} networks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "183--194",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167005",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Markov-Modulated Bernoulli Process (MMBP) model is
used to analyze the delay experienced by messages in
clocked, packed-switched Banyan networks with $ k
\times k $ output-buffered switches. This approach
allows us to analyze both single packet messages and
multipacket messages with general traffic pattern
including uniform traffic, hot-spot traffic, locality
of reference, etc. The ability to analyze multipacket
messages is very important for multimedia applications.
Previous work, which is only applicable to restricted
message and traffic patterns, resorts to either
heuristic correction factors to artificially tune the
model or tedious computational efforts. In contrast,
the proposed model, which is applicable to much more
general message and traffic patterns, not only is an
application of a theoretically complete model but also
requires a minimal amount of computational effort. In
all cases, the analytical results are compared with
results obtained by simulation and are shown to be very
accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arakawa:1993:MVR,
author = "Hiroshi Arakawa and Daniel I. Katcher and Jay K.
Strosnider and Hideyuki Tokuda",
title = "Modeling and validation of the real-time {Mach}
scheduler",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "195--206",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167008",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Real-time scheduling theory is designed to provide
{\em a priori\/} verification that all real-time tasks
meet their timing requirements. However, this body of
theory generally assumes that resources are
instantaneously pre-emptable and ignores the costs of
systems services. In previous work [1, 2] we provided a
theoretical foundation for including the costs of the
operating system scheduler in the real-time scheduling
framework. In this paper, we apply that theory to the
Real-Time (RT) Mach scheduler. We describe a
methodology for measuring the components of the RT Mach
scheduler in user space. We analyze the predicted
performance of different real-time task sets on the
target system using the scheduling model and the
measured characteristics. We then verify the model
experimentally by measuring the performance of the
real-time task sets, consisting of RT Mach threads, on
the target system, The experimental measurements verify
the analytical model to within a small percentage of
error. Thus, using the model we have successfully
predicted the performance of real-time task sets using
system services, and developed consistent methodologies
to accomplish that prediction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baruah:1993:RHS,
author = "Sanjoy Baruah and Jayant R. Haritsa",
title = "{ROBUST}: a hardware solution to real-time overload",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "207--216",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167010",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "No on-line scheduling algorithm operating in a
uniprocessor environment can guarantee to obtain an
effective processor utilization greater than 25\% under
conditions of overload. This result holds in the most
general case, where incoming tasks may have arbitrary
slack times. We address here the issue of improving
overload performance in environments where the
slack-time characteristics of all incoming tasks
satisfy certain constraints. In particular, we present
a new scheduling algorithm, ROBUST, that efficiently
takes advantage of these task slack constraints to
provide improved overload performance and is
asymptotically optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dey:1993:ELP,
author = "Jayanta K. Dey and James F. Kurose and Don Towsley and
C. M. Krishna and Mahesh Girkar",
title = "Efficient on-line processor scheduling for a class of
{IRIS} ({Increasing Reward with Increasing Service})
real-time tasks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "217--228",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167013",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider the problem of on-line
scheduling of real-time tasks which receive a `reward'
that depends on the amount of service received. In our
model, tasks have associated deadlines at which they
must depart the system. The task computations are such
that the longer they are able to execute before their
deadline, the greater the value of their computations,
i.e., the tasks have the property that they receive
{\em increasing reward with increasing service (IRIS)}.
We focus on the problem of scheduling IRIS tasks in a
system in which tasks arrive randomly over time, with
the goal of maximizing the average reward accrued per
task and per unit time. We describe and evaluate a
two-level policy for this system. A top-level algorithm
executes each time a task arrives and determines the
amount of service to allocate to each task in the
absence of future arrivals. A lower-level algorithm, an
earliest deadline first (EDF) policy in our case, is
responsible for the actual selection of tasks to
execute. This two-level policy is evaluated through a
combination of analysis and simulation, We observe that
it provides nearly optimal performance when the
variance in the interarrival times and/or laxities is
low and that the performance is more sensitive to
changes in the arrival process than the deadline
distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Morris:1993:ASS,
author = "Robert J. T. Morris",
title = "Analysis of superposition of streams into a cache
buffer",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "229--235",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167016",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the superposition of address streams into
a cache buffer which is managed according to a Least
Recently Used (LRU) replacement policy. Each of the
streams is characterized by a stack depth distribution,
i.e., the cache hit ratio as a function of the cache
size, if that individual stream were applied to a LRU
cache. We seek the cache hit ratio for each stream,
when the combined stream is applied to a shared LRU
cache. This problem arises in a number of branches of
computer science, particularly in database systems and
processor architecture. We provide two techniques to
solve this problem and demonstrate their effectiveness
using database I/O request streams. The first technique
is extremely simple and relies on an assumption that
the buffer is `well-mixed'. The second technique
relaxes this assumption and provides more accurate
results. We evaluate the performance of the two
techniques on realistic data, both in a lab environment
and a large database installation. We find that the
first simple technique provides accuracy which is
sufficient for most practical purposes. By
investigating sources of error and trying various
improvements in the model we obtain some insight into
the nature of database I/O request streams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsai:1993:AMC,
author = "Jory Tsai and Anant Agarwal",
title = "Analyzing multiprocessor cache behavior through data
reference modeling",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "236--247",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops a {\em data reference modeling\/}
technique to estimate with high accuracy the cache miss
ratio in cache-coherent multiprocessors. The technique
involves analyzing the dynamic data referencing
behavior of parallel algorithms. Data reference
modeling first identifies different types of shared
data blocks accessed during the execution of a parallel
algorithm, then captures in a few parameters the cache
behavior of each shared block as a function of the
problem size, number of processors, and cache line
size, and finally constructs an analytical expression
for each algorithm to estimate the cache miss ratio.
Because the number of processors, problem size, and
cache line size are included as parameters, the
expression for the each miss ratio can be used to
predict the performance of systems with different
configurations. Six parallel algorithms are studied,
and the analytical results compared against previously
published simulation results, to establish the
confidence level of the data reference modeling
technique. It is found that the average prediction
error for four out of six algorithms is within five
percent and within ten percent for the other two. The
paper also derives from the model several results on
how cache miss rates scale with system size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:1993:ETS,
author = "Margaret Martonosi and Anoop Gupta and Thomas
Anderson",
title = "Effectiveness of trace sampling for performance
debugging tools",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "248--259",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently there has been a surge of interest in
developing performance debugging tools to help
programmers tune their applications for better memory
performance [2, 4, 10]. These tools vary both in the
detail of feedback provided to the user, and in the
run-time overbead of using them. MemSpy [10] is a
simulation-based tool which gives programmers detailed
statistics on the memory system behavior of
applications. It provides information on the frequency
and causes of cache misses, and presents it in terms of
source-level data and code objects with which the
programmer is familiar. However, using MemSpy increases
a program's execution time by roughly 10 to 40 fold.
This overhead is generally acceptable for applications
with execution times of several minutes or less, but it
can be inconvenient when tuning applications with very
long execution times. This paper examines the use of
trace sampling techniques to reduce the execution time
overhead of tools like MemSpy. When simulating one
tenth of the references, we find that MemSpy's
execution time overhead is improved by a factor of 4 to
6. That is, the execution time when using MemSpy is
generally within a factor of 3 to 8 times the normal
execution time. With this improved performance, we
observe only small errors in the performance statistics
reported by MemSpy. On moderate sized caches of 16KB to
128KB, simulating as few as one tenth of the references
(in samples of 0.5M references each) allows us to
estimate the program's actual cache miss rate with an
absolute error no greater than 0.3\% on our five
benchmarks. These errors are quite tolerable within the
context of performance bugging. With larger caches we
can also obtain good accuracy by using longer sample
lengths. We conclude that, used with care, trace
sampling is a powerful technique that makes possible
performance debugging tools which provide {\em both\/}
detailed memory statistics {\em and\/} low execution
time overheads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ahn:1993:HTS,
author = "Jong-Suk Ahn and Peter B. Danzig and Deborah Estrin
and Brenda Timmerman",
title = "Hybrid technique for simulating high bandwidth delay
computer networks",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "260--261",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Researchers evaluate and contrast new network routing,
admission control, congestion control and flow control
algorithms through simulation. Analytically derived
arguments justifiably lack credibility because, in the
attempt to model the underlying physical system, the
analyst is forced to make compromising approximations.
However, unlike analytical techniques like Jackson
Queueing Networks, simulations require significant
computation and a simulation's state can consume a
great deal of memory. This paper describes a technique
that we developed to reduce the memory consumption of
communication network simulators. Reduced memory makes
simulations of larger and higher bandwidth-delay
networks possible, but introduces an adjustable degree
of approximation in the simulation. The higher the
memory savings, the less accurate the computed
measures. We call our technique {\em Flowsim}. The
paper motivates the need to simulate computer networks
rather than model them analytically, motivates why a
simulator's state can grow quickly, and explains why
analytical techniques have failed to model modern
communication networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Becker:1993:AIC,
author = "Jeffrey C. Becker and Arvin Park",
title = "An analysis of the information content of address and
data reference streams",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "262--263",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent increases in VLSI processor speed and
transistor density have not been matched by a
proportionate increase in the number of I/O pins used
to communicate information on and off chip. Since the
number of I/O pins is limited by packaging technology
and switching constraints, this trend is likely to
continue, and I/O bandwidth will become the primary
VLSI system performance bottleneck. This paper analyzes
the potential of address and data stream coding in
order to reduce bandwidth requirements",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghandeharizadeh:1993:EAV,
author = "Shahram Ghandeharizadeh and Luis Ramos",
title = "An evaluation of alternative virtual replication
strategies for continuous retrieval of multimedia
data",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "264--265",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During the past decade, information technology has
evolved to store and retrieve multimedia data (e.g.,
audio, video). Multimedia information systems utilize a
variety of human senses to provide an effective means
of conveying information. Already, these systems play a
major role in educational applications, entertainment
technology, and library information systems. A
challenging task when implementing these systems is to
support a continuous retrieval of an object at the
bandwidth required by its media type. This is
challenging because certain media types, in particular
video, require very high bandwidths. For example, the
bandwidth required by NTSC (the US standard established
by the National Television System Committee) for
`network-quality' video is about 45 megabits per second
(Mbps). Recommendation 601 of the International Radio
Consultative Committee (CCIR) calls for a 216 Mbps
bandwidth for video objects. A video object based on
the HDTV (High Definition Television) quality images
requires approximately a 700 Mbps bandwidth. Compare
these bandwidth requirements with the typical 10 Mbps
bandwidth of a magnetic disk drive, which is not
expected to increase significantly in the near future.
Currently, there are several ways to support continuous
display of these objects: (1) sacrifice the quality of
the data by using either a lossy compression technique
or a low resolution device, (2) employ the aggregate
bandwidth of several disk drives by declustering an
object across multiple disks [2], and (3) use a
combination of these two techniques. Lossy compression
techniques encode data into a form that consumes a
relatively small amount of space, however, when the
data is decoded, it yields a representation similar to
the original (some loss of data). While it is
effective, there are applications that cannot tolerate
loss of data. As an example consider the video signals
collected from space. This data may not be compressed
using a lossy compression technique. Otherwise, the
scientists who later uncompress and analyze the data
run the risk of either observing phenomena that may not
exist due to a slight change in data or miss important
observations due to some loss of data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kay:1993:STI,
author = "Jonathan Kay and Joseph Pasquale",
title = "A summary of {TCP\slash IP} networking software
performance for the {DECstation 5000}",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "266--267",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network software speed is not increasing as rapidly as
that of work-station CPUs. The goal of this study is to
determine how various components of network software
contribute to this bottleneck. In this extended
abstract, we summarize the performance of TCPIP and
UDPIP networking software for the DECstation 5000/200
workstations connected by an FDDI LAN.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lewandowski:1993:AAP,
author = "Gary Lewandowski and Anne Condon and Eric Bach",
title = "Asynchronous analysis of parallel dynamic
programming",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "268--269",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine a very simple asynchronous model of
parallel computation that assumes the time to compute a
task is random, following some probability
distribution. The goal of this model is to capture the
effects of unexpected delays on processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shin:1993:ELS,
author = "Kang G. Shin and Chao-Ju Hou",
title = "Evaluation of load sharing in {HARTS} while
considering message routing and broadcasting",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "270--271",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we apply the load sharing (LS)
mechanism proposed in [1, 2] to HARTS, an experimental
distributed realtime system [3] currently being built
at the Real-Time Computing Laboratory of the University
of Michigan.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Torrellas:1993:BCA,
author = "Josep Torrellas and Andrew Tucker and Anoop Gupta",
title = "Benefits of cache-affinity scheduling in shared-memory
multiprocessors: a summary",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "272--274",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An interesting and common class of workloads for
shared-memory multiprocessors is multiprogrammed
workloads. Because these workloads generally contain
more processes than there are processors in the
machine, there are two factors that increase the number
of cache misses. First, several processes are forced to
time-share the same cache, resulting in one process
displacing the cache state previously built up by a
second one. Consequently, when the second process runs
again, it generates a stream of misses as it rebuilds
its cache state. Second since an idle processor simply
selects the highest priority runnable process, a given
process often moves from one CPU to another. This
frequent migration results in the process having to
continuously reload its state into new caches,
producing streams of cache misses. To reduce the number
of misses in these workloads, processes should reuse
their cached state more. One way to encourage this is
to schedule each process based on its affinity to
individual caches, that is, based on the amount of
state that the process has accumulated in an individual
cache. This technique is called {\em cache affinity
scheduling}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vetland:1993:CMA,
author = "Vidar Vetland and Peter Hughes and Arne S{\o}lvberg",
title = "A composite modelling approach to software performance
measurement",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "275--276",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditionally performance modellers have tended to
ignore the difficulty of obtaining parameter values
which represent the resource demands of multi-layered
software. In practice the use of performance
engineering in large-scale systems development is
limited by the cost of acquiring appropriate
performance information about the various software
components. However, if this information cart be reused
when components are combined in different ways, then
the cost of measurement can be more easily justified.
Such reuse can be achieved by means of a composite work
model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wagner:1993:AMV,
author = "David B. Wagner",
title = "Approximate mean value analysis of interconnection
networks with deflection routing",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "277--278",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an Approximate Mean Value Analysis
model of deflection routing in Shuffle-Loop
interconnection networks. (The methodology is readily
extended to other network topologies.) In contrast to
most previous work on deflection routing, the model
makes no assumptions about traffic patterns, nor does
it assume that messages that cannot be admitted to the
network are lost. The technique allows the network to
be modeled in its entirety: all processors, switches,
and memory modules, and their steady-state
interactions, are modeled explicitly. The results of
the model are found to be in close agreement with the
results of simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:1993:OFT,
author = "Carey L. Williamson",
title = "Optimizing file transfer response time using the
loss-load curve congestion control mechanism",
journal = j-SIGMETRICS,
volume = "21",
number = "1",
pages = "279--280",
month = jun,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/166962.167043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:14:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Loss-load curves are a recently proposed feedback
mechanism for rate-based congestion control in datagram
computer networks. In the loss-load model, packet loss
inside the network is a direct function of sender
transmission rates, and senders choose their own
transmission rate based on the loss-load tradeoff curve
provided by the network. Earlier work [1] has provided
the mathematical basis for the loss-load model and
provided preliminary simulation results demonstrating
its responsiveness, fairness, and stability. The
loss-load model works Well for simple network
environments where each source has a large number of
packets to transmit, and wishes to maximize raw
throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lipsky:1993:BRI,
author = "Lester Lipsky",
title = "Book review: {``Introduction to Computer System
Performance Evaluation'' by Krishna Kant (McGraw-Hill,
1992)}",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "7--9",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.1044951",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this book, in the author's own words is
`\ldots{} two-fold. First, it should be usable as a
text for a one or two semester graduate course in the
theory and practice of performance evaluation with
strong emphasis on analytic modeling. Second, it should
be useful as a reference to both researchers and
practitioners in the performance evaluation field'. The
recommended prerequisite courses are `probability
theory, operating systems, and computer architecture.'
If one throws in a course in linear algebra or matrix
theory (how can Markov chains be studied without it?)
then one has the typical undergraduate major (or a good
minor) degree in Computer Science/Engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kinicki:1993:BRT,
author = "Robert E. Kinicki",
title = "Book review: {``Telecommunications and Networking'' by
Udo W. Pooch, Denis Machuel and John McCahn (CRC Press,
1991)}",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "9--10",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.1044952",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is intended is an introduction to
telecommunications. In the preface the authors mention
that one of their goals is to present an overview of
the interaction and relationship between
telecommunications and data processing. Thus the text
is divided into three parts --- basics of
telecommunications, transmission systems, and
networking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:1993:SCM,
author = "Xiren Cao",
title = "Some common misconceptions about performance modeling
and validation",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "11--15",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.174217",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing networks and Markov processes etc. are widely
used in modeling computer systems and communication
networks to study their performance and reliability. To
solve a real world problem, the model developed has to
be validated through measured data. In this paper, we
point out that in validating a model, one has to be
very clear about one's claims regarding what has been
validated; Too `accurate' results do not imply a
correct model and usually indicates a validation
problem. We discuss some common misconceptions in
performance modeling and validation. We illustrate our
points through examples. To capture the main concepts,
the problems are simplified in these examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maffeis:1993:CMA,
author = "Silvano Maffeis",
title = "Cache management algorithms for flexible filesystems",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "16--25",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.174219",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache management in flexible filesystems deals with
the problem of determining a cached file to be replaced
when the local cache space is exhausted. In analogy to
virtual memory management, several different algorithms
exist for managing cached files. In this paper we
simulate the behavior of {\em First-In-First-Out
(FIFO), Least Recently Used (LRU), Least Frequently
Used (LFU)\/} and a variation of LFU we call the {\em
File Length Algorithm (LEN)\/} from the viewpoint of
file access times, cache hit ratios and availability.
The results of several simulation runs are presented
and interpreted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{UI:1993:PMA,
author = "{UNIX International}",
title = "Performance management activities within {UNIX
International}",
journal = j-SIGMETRICS,
volume = "21",
number = "2",
pages = "42--42",
month = dec,
year = "1993",
CODEN = "????",
DOI = "https://doi.org/10.1145/174215.174221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The primary output of the UNIX International Work
Group on Performance Measurement is a set of
requirements and recommendations to UNIX International
and UNIX System Laboratories for the development of
standard performance measurement interfaces to the UNIX
System. Requirements will be based on the collective,
non-vendor specific needs for a standard performance
architecture. Currently the lack of this standard
causes undue porting and kernel additions by each UNIX
System vendor as well as a great variety of approaches
to gain the same basic performance insight into the
system. Building tools to monitor, display, model, or
predict performance or its trends is a frustrating and
currently single vendor enterprise. By providing
standard data structures, types of performance data
gathered, and a common kernel interface to collect this
data, the whole UNIX system vendor community along with
the UNIX software vendors can develop performance tools
which last more than one UNIX release and work on
multiple UNIX platforms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dujmovic:1994:BRB,
author = "Jozo J. Dujmovi{\'c}",
title = "Book review: {``The Benchmarking Handbook for Database
and Transaction Processing Systems'' Edited by Jim Gray
(Morgan Kaufmann Publishers, Inc., 1991)}",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "4--5",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.1044953",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This book is a short, complete summary of the most
important approaches to performance measurements of
database systems and transaction processing systems. It
is intended to serve as a tutorial for the novice and a
reference for the professional. Included are
contributions by ten authors: Dina Bitton, Rick
Cattell, David DeWitt, Jim Gray, Neal Nelson, Patrick
O'Neil, Tom Sawyer, Omri Serlin, Carolyn Turbyfill, and
Cyril Orji.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Finkel:1994:BRE,
author = "David Finkel",
title = "Book review: {``Encyclopedia of Computer Science'',
Third Edition, edited by Anthony Ralston and Edwin I.
Reilly (Van Nostrand Reinhold, 1993)}",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "6--6",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.1044954",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The new edition of the well-regarded {\em Encyclopedia
of Computer Science\/} is truly impressive. It's over
1500 pages long, with over 700 articles. While some
articles just define a term in a few paragraphs, others
are much more extensive: the article on operating
systems is 25 pages long. There's even a twelve-page
section of full-color illustrations, with the expected
pictures of computer graphics, fractals, and scientific
visualization, as well as an unexpected section of
illustrations of postage stamps dealing with
computing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schieber:1994:RRT,
author = "Colleen D. Schieber and Eric E. Johnson",
title = "{RATCHET}: real-time address trace compression
hardware for extended traces",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "22--32",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.181842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The address traces used in computer architecture
research are commonly generated using software
techniques that introduce time dilations of an order of
magnitude or more. Such techniques may also omit
classes of memory references that are important for
accurate models of computer systems, such as
instruction prefetches, operating system references,
and interrupt activity. We describe a technique for
capturing all classes of references in real time.
RATCHET employs trace filtering hardware to reduce the
bandwidth and storage requirements that have previously
limited the usefulness of hardware-based tracing. In
evaluating this technique using the ten SPEC89
benchmark programs running on a Sun-3/60 workstation,
we found that a small filter cache achieves compression
ratios in the 10--30 range during the startup section
of the programs. Traces from the middle sections of the
C programs achieved compression ratios of 20--30, while
the FORTRAN codes produced ratios of 45--84. Traces
from a smaller ionospheric simulator program were
compressed by factors of 100.These filtered traces
typically represent 36 million contiguous references.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1994:SCQ,
author = "Surendra M. Gupta and Fikri Karaesmen",
title = "Solution to complex queueing systems: a spreadsheet
approach",
journal = j-SIGMETRICS,
volume = "21",
number = "3--4",
pages = "33--46",
month = apr,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/181840.181843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, some very useful and applicable ideas
are presented to facilitate solving complex problems in
Queueing Theory. It is demonstrated how a spreadsheet
can be used to solve problems which many practitioners
find very intimidating. To this end an algorithm is
presented which is particularly designed for easy
implementation in a spreadsheet. A template is provided
illustrating the implementation of the algorithm. The
use of the template is demonstrated in various queueing
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "queueing systems; spreadsheets",
}
@Article{Denning:1994:FLK,
author = "Peter J. Denning",
title = "The fifteenth level (keynote address)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "1--4",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peris:1994:AIM,
author = "Vinod G. J. Peris and Mark S. Squillante and Vijay K.
Naik",
title = "Analysis of the impact of memory in distributed
parallel processing systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "5--18",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an important tradeoff between processor
and memory allocation in distributed parallel
processing systems. To study this tradeoff, we
formulate stochastic models of parallel program
behavior, distributed parallel processing environments
and memory overheads incurred by parallel programs as a
function of their processor allocation. A mathematical
analysis of the models is developed, which includes the
effects of contention for shared resources caused by
paging activity. We conduct a detailed analysis of real
large-scale scientific applications and use these
results to parameterize our models. Our results show
that memory overhead resulting from processor
allocation decisions can have a significant effect on
system performance in distributed parallel
environments, strongly suggesting that memory
considerations must be incorporated in the resource
allocation policies for parallel systems. We also
demonstrate the importance of the inter-locality miss
ratio, which is introduced in this paper and analyzed
for the first time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McCann:1994:PAP,
author = "Cathy McCann and John Zahorjan",
title = "Processor allocation policies for message-passing
parallel computers",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "19--32",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When multiple jobs compete for processing resources on
a parallel computer, the operating system kernel's
processor allocation policy determines how many and
which processors to allocate to each. In this paper we
investigate the issues involved in constructing a
processor allocation policy for large scale,
message-passing parallel computers supporting a
scientific workload. We make four specific
contributions: We define the concept of efficiency
preservation as a characteristic of processor
allocation policies. Efficiency preservation is the
degree to which the decisions of the processor
allocator degrade the processor efficiencies
experienced by individual applications relative to
their efficiencies when run alone. We identify the
interplay between the kernel processor allocation
policy and the application load distribution policy as
a determinant of efficiency preservation. We specify
the details of two families of processor allocation
policies, called Equipartition and Folding. Within each
family, different member policies cover a range of
efficiency preservation values, from very high to very
low. By comparing policies within each family as well
as between families, we show that high efficiency
preservation is essential to good performance, and that
efficiency preservation is a more dominant factor in
obtaining good performance than is equality of resource
allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:1994:UAC,
author = "Su-Hui Chiang and Rajesh K. Mansharamani and Mary K.
Vernon",
title = "Use of application characteristics and limited
preemption for run-to-completion parallel processor
scheduling policies",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "33--44",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance potential of run-to-completion (RTC)
parallel processor scheduling policies is investigated
by examining whether (1) application execution rate
characteristics such as average parallelism (avg) and
processor working set (PWS) and/or (2) limited
preemption can be used to improve the performance of
these policies. We address the first question by
comparing policies (previous as well as new) that
differ only in whether or not they use execution rate
characteristics and by examining a wider range of the
workload parameter space than previous studies. We
address the second question by comparing a simple
two-level queueing policy with RTC scheduling in the
second level queue against RTC policies that don't
allow any preemption and against dynamic
equiallocation(EQ).Using simulation to estimate mean
response times we find that for promising RTC policies
such as adaptive static partitioning (ASP) and shortest
demand first (SDF), a maximum allocation constraint
that is for all practical purposes independent of avg
and pws provides greater and more consistent
improvement in policy performance than using avg or
pws. Also, under the assumption that job demand
information is unavailable to the scheduler we show
that the ASP-max policy outperforms all previous high
performance RTC policies for workloads with coefficient
of variation in processing requirement greater than
one. Furthermore, a two-level queue that allows at most
one preemption per job outperforms ASP-max but is not
competitive with EQ.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:1994:SMQ,
author = "Joel L. Wolf and John Turek and Ming-Syan Chen and
Philip S. Yu",
title = "Scheduling multiple queries on a parallel machine",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "45--55",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183024",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been a good deal of progress made recently
towards the efficient parallelization of individual
phases of single queries in multiprocessor database
systems. In this paper we devise and evaluate a number
of scheduling algorithms designed to handle multiple
parallel queries. One of these algorithms emerges as a
clear winner. This algorithm is hierarchical in nature:
In the first phase, a good quality precedence-based
schedule is created for each individual query and each
possible number of processors. This component employs
dynamic programming. In the second phase, the results
of the first phase are used to create an overall
schedule of the full set of queries. This component is
based on previously published work on
nonprecedence-based malleable scheduling. Even though
the problem we are considering is NP-hard in the strong
sense, the multiple query schedules generated by our
hierarchical algorithm are seen experimentally to
achieve results which are close to optimal.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:1994:AMH,
author = "Jignesh M. Patel and Michael J. Carey and Mary K.
Vernon",
title = "Accurate modeling of the hybrid hash join algorithm",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "56--66",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The join of two relations is an important operation in
database systems. It occurs frequently in relational
queries, and join performance is a significant factor
in overall system performance. Cost models for join
algorithms are used by query optimizers to choose
efficient query execution strategies. This paper
presents an efficient analytical model of an important
join method, the hybrid hash join algorithm, that
captures several key features of the algorithm's
performance --- including its intra-operator
parallelism, interference between disk reads and
writes, caching of disk pages, and placement of data on
disk(s). Validation of the model against a detailed
simulation of a database system shows that the response
time estimates produced by the model are quite
accurate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bittan:1994:APB,
author = "Avi Bittan and Yaakov Kogan and Philip S. Yu",
title = "Asymptotic performance of a buffer model in a data
sharing environment",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "67--76",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a transaction processing system is
very sensitive to the buffer hit probability. In a data
sharing environment where multiple computing nodes are
coupled together with direct access to shared data on
disks, buffer coherency needs to be maintained such
that if a data granule is updated by a node, the old
copies of this granule present in the buffer of other
nodes must be invalidated. The buffer invalidation
phenomenon reduces the buffer hit probability in a
multi-node environment. After the buffer reaches a
certain size, the buffer hit probability will remain
constant regardless of further increase in buffer size
due to the buffer invalidation effect. This puts an
upper limit on the achievable buffer hit probability.
Thus the selection of appropriate buffer size is one of
the critical issues in a data sharing environment. In
this paper, we develop an asymptotic analysis of the
Markov model for a buffer in the data sharing
environment. Important relations between buffer size,
number of nodes, write-probability and the size of the
database to the buffer hit probability had been found
in all range of system parameters. A simple expression
is obtained for the maximum achievable buffer hit
probability and also for the maximum usable buffer
size. Various properties of the maximum achievable
buffer hit probability and usable buffer size are
derived for a skewed access workload. The accuracy of
the asymptotic method is validated by numerous case
studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Petriu:1994:AMV,
author = "Dorina C. Petriu",
title = "Approximate mean value analysis of client-server
systems with multi-class requests",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "77--86",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic Rendezvous Networks (SRVNs) are performance
models for multitasking parallel software with
intertask communication via rendezvous introduced in
[1], which are very appropriate to model client-server
systems. SRVNs differ from Queueing Networks (QNs) in
two ways: nodes act as both clients and servers
(allowing for nested service), and servers have two
distinct phases of service --- the first one ``in RV''
with the client, and the second ``after RV'', executed
in parallel with the client. Early work on solving SRVN
models has used a kind of approximate Mean Value
Analysis based on heuristic ad hoc assumptions to
determine the task queue properties at the instant of
RV request arrivals. Approximation are necessary since
SRVN violates product form. Recently, a more rigorous
approach was proposed in [2] for the solution of SRVN
models, based on a special aggregation (named
``Task-Directed Aggregation'' TDA) of the Markov chain
model describing the interference of different clients
that contend for a single server with FIFO queueing
discipline and different service times. The algorithm
derived in [2] has the limitation that each client may
require only a single class of service. In general, a
software server offers a range of services with
different workloads and functionalities, and a client
may need more than one service. The present paper uses
the TDA approach to derive an extended algorithm which
allows a client to require any number of services from
a server by changing randomly the request class. The
new algorithm is incorporated into a decomposition
method for models with any number of servers. The SRVN
modelling technique is applied to a large case study of
a distributed database system, giving insight into the
behaviour of the system and helping to identify
performance problems such as software bottle-neck.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balbo:1994:ATP,
author = "G. Balbo and S. C. Bruell and M. Sereno",
title = "Arrival theorems for product-form stochastic {Petri}
nets",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "87--97",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a particular class of Stochastic Petri
Nets whose stationary probabilities at arbitrary
instants exhibit a product form. We study these nets at
specific instants in the steady state that occur
directly after the firing of a transition. We focus our
attention on the instant after tokens are removed from
the places specified by a transition's input bag and
just before tokens are entered into the places
specified by the same transition's output bag. We show
that the stationary probabilities at ``arrival
instants'' are related to corresponding stationary
probabilities at arbitrary instants in net(s) with
lower load. We then show how one of the ``arrival''
theorems can be applied to the derivation of a formula
for the mean sojourn time of a token in a place at
steady state. This is the basis for the development of
a Mean Value Analysis algorithm for the computation of
performance indices for Product-Form Stochastic Petri
Nets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Somani:1994:PMS,
author = "Arun K. Somani and Kishor S. Trivedi",
title = "Phased-mission system analysis using {Boolean}
algebraic methods",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "98--107",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most reliability analysis techniques and tools assume
that a system is used for a mission consisting of a
single phase. However, multiple phases are natural in
many missions. The failure rates of components, system
configuration, and success criteria may vary from phase
to phase. In addition, the duration of a phase may be
deterministic or random. Recently, several researchers
have addressed the problem of reliability analysis of
such systems using a variety of methods. We describe a
new technique for phased-mission system reliability
analysis based on Boolean algebraic methods. Our
technique is computationally efficient and is
applicable to a large class of systems for which the
failure criterion in each phase can be expressed as a
fault tree (or an equivalent representation). Our
technique avoids state space explosion that commonly
plague Markov chain-based analysis. We develop a phase
algebra to account for the effects of variable
configurations and success criteria from phase to
phase. Our technique yields exact (as opposed to
approximate) results. We demonstrate the use of our
technique by means of an example and present numerical
results to show the effects of mission phases on the
system reliability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Boolean algebraic methods; fault trees; phased-mission
systems; random phase duration; reconfiguration;
reliability analysis; ultra-reliable computer system;
variable success criteria",
}
@Article{Ebling:1994:SEF,
author = "Maria R. Ebling and M. Satyanarayanan",
title = "{SynRGen}: an extensible file reference generator",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "108--117",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SynRGen, a synthetic file reference generator
operating at the system call level, is capable of
modeling a wide variety of usage environments. It
achieves realism through trace-inspired micromodels and
flexibility by combining these micromodels
stochastically. A micromodel is a parameterized piece
of code that captures the distinctive signature of an
application. We have used SynRGen extensively for
stress testing the Coda File System. We have also
performed a controlled experiment that demonstrates
SynRGen's ability to closely emulate real users ---
within 20\% of many key system variables. In this paper
we present the rationale, detailed design, and
evaluation of SynRGen, and mention its applicability to
broader uses such as performance evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raghavan:1994:GNM,
author = "S. V. Raghavan and D. Vasukiammaiyar and Gunter
Haring",
title = "Generative networkload models for a single server
environment",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "118--127",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183031",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Any performance evaluation study requires a concise
description of the workload under which the performance
of the system is to be evaluated. Also, the
repeatability of the experiments for different workload
profiles, requires that the workload models generate
the workload profiles parametrically. Such a model,
should preferably be time-invariant, consistent and
generative. We view the networkload as a sequence that
can be generated from the rules of a Context Free
Grammar (CFG). Our approach combines the established
practice of viewing the workload as ``consisting of a
hierarchy'' and the CFG description, to produce a
generative networkload model. The networkload model is
applied to a SingleServer--MultipleClients network by
deriving the networkload model parameters from an
operational SingleServer network of personal computers.
The time-invariance and generative nature are verified
experimentally. The usefulness of such a description of
the networkload to study the resource management
problems of a network, like the optimal allocation of
clients to servers, is explored by using the generative
model as input descriptor to a queueing network model
of SingleServer network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cmelik:1994:SFI,
author = "Bob Cmelik and David Keppel",
title = "{Shade}: a fast instruction-set simulator for
execution profiling",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "128--137",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Tracing tools are used widely to help analyze, design,
and tune both hardware and software systems. This paper
describes a tool called Shade which combines efficient
instruction-set simulation with a flexible, extensible
trace generation capability. Efficiency is achieved by
dynamically compiling and caching code to simulate and
trace the application program. The user may control the
extent of tracing in a variety of ways; arbitrarily
detailed application state information may be collected
during the simulation, but tracing less translates
directly into greater efficiency. Current Shade
implementations run on SPARC systems and simulate the
SPARC (Versions 8 and 9) and MIPS I instruction sets.
This paper describes the capabilities, design,
implementation, and performance of Shade, and discusses
instruction set emulation in general.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Noble:1994:ESH,
author = "Brian D. Noble and M. Satyanarayanan",
title = "An empirical study of a highly available file system",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "138--149",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present results from a six-month
empirical study of the high availability aspects of the
Coda File System. We report on the service failures
experienced by Coda clients, and show that such
failures are masked successfully. We also explore the
effectiveness and resource costs of key aspects of
server replication and disconnected operation, the two
high availability mechanisms of Coda. Wherever
possible, we compare our measurements to
simulation-based predictions from earlier papers and to
anecdotal evidence from users. Finally, we explore how
users take advantage of the support provided by Coda
for mobile computing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dahlin:1994:QAC,
author = "Michael D. Dahlin and Clifford J. Mather and Randolph
Y. Wang and Thomas E. Anderson and David A. Patterson",
title = "A quantitative analysis of cache policies for scalable
network file systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "150--160",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current network file system protocols rely heavily on
a central server to coordinate file activity among
client workstations. This central server can become a
bottleneck that limits scalability for environments
with large numbers of clients. In central server
systems such as NFS and AFS, all client writes, cache
misses, and coherence messages are handled by the
server. To keep up with this workload, expensive server
machines are needed, configured with high-performance
CPUs, memory systems, and I/O channels. Since the
server stores all data, it must be physically capable
of connecting to many disks. This reliance on a central
server also makes current systems inappropriate for
wide area network use where the network bandwidth to
the server may be limited.In this paper, we investigate
the quantitative performance effect of moving as many
of the server responsibilities as possible to client
workstations to reduce the need for high-performance
server machines. We have devised a cache protocol in
which all data reside on clients and all data transfers
proceed directly from client to client. The server is
used only to coordinate these data transfers. This
protocol is being incorporated as part of our
experimental file system, xFS. We present results from
a trace-driven simulation study of the protocol using
traces from a 237 client NFS installation. We find that
the xFS protocol reduces server load by more than a
factor of six compared to AFS without significantly
affecting response time or file availability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kotz:1994:ELS,
author = "David Kotz and Preston Crow",
title = "The expected lifetime of ``single-address-space''
operating systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "161--170",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trends toward shared-memory programming paradigms,
large (64-bit) address spaces, and memory-mapped files
have led some to propose the use of a single
virtual-address space, shared by all processes and
processors. Typical proposals require the single
address space to contain all process-private data,
shared data, and stored files. To simplify management
of an address space where stable pointers make it
difficult to re-use addresses, some have claimed that a
64-bit address space is sufficiently large that there
is no need to ever re-use addresses. Unfortunately,
there has been no data to either support or refute
these claims, or to aid in the design of appropriate
address-space management policies. In this paper, we
present the results of extensive kernel-level tracing
of the workstations in our department, and discuss the
implications for single-address-space operating
systems. We found that single-address-space systems
will not outgrow the available address space, but only
if reasonable space-allocation policies are used, and
only if the system can adapt as larger address space
becomes available.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivasubramaniam:1994:ASS,
author = "Anand Sivasubramaniam and Aman Singla and Umakishore
Ramachandran and H. Venkateswaran",
title = "An approach to scalability study of shared memory
parallel systems",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "171--180",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The overheads in a parallel system that limit its
scalability need to be identified and separated in
order to enable parallel algorithm design and the
development of parallel machines. Such overheads may be
broadly classified into two components. The first one
is intrinsic to the algorithm and arises due to factors
such as the work-imbalance and the serial fraction. The
second one is due to the interaction between the
algorithm and the architecture and arises due to
latency and contention in the network. A top-down
approach to scalability study of shared memory parallel
systems is proposed in this research. We define the
notion of overhead functions associated with the
different algorithmic and architectural characteristics
to quantify the scalability of parallel systems; we
isolate the algorithmic overhead and the overheads due
to network latency and contention from the overall
execution time of an application; we design and
implement an execution-driven simulation platform that
incorporates these methods for quantifying the overhead
functions; and we use this simulator to study the
scalability characteristics of five applications on
shared memory platforms with different communication
topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mehra:1994:CTM,
author = "Pankaj Mehra and Catherine H. Schulbach and Jerry C.
Yan",
title = "A comparison of two model-based performance-prediction
techniques for message-passing parallel programs",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "181--190",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes our experience in modeling two
significant parallel applications: ARC2D, a
2-dimensional Euler solver; and, Xtrid, a tridiagonal
linear solver. Both of these models were expressed in
BDL (Behavior Description language) and simulated on an
iPSC/860 Hypercube modeled using Axe (Abstract
eXecution Environment). BDL models consist of abstract
communicating objects: blocks of sequential code are
modeled by single RUN statements; all communication
operations in the original code are mirrored by
corresponding BDL operations in the model. Our ARC2D
model was built by first profiling the program to
locate the significant loops and then timing the basic
blocks within those loops. Simulated completion times
were (except in one case) within 8\% of measured
execution times. Lengthy simulations were necessary for
predicting the performance of large-scale runs. For
Xtrid, only the loops surrounding communications were
modeled; other loops were absorbed into large
sequential blocks whose complexity was estimated using
statistical regression. This approach yielded a much
smaller model whose computation and communication
complexities were clearly manifest. Analysis of
complexity allowed rapid prediction of large-scale
performance without lengthy simulations! Analytically
predicted speed-ups were within 7\% of those predicted
by simulation. Simulated completion times were within
5\% of measured execution times. The second approach
provides a more effective methodology for
simulation-based performance-tuning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Horton:1994:MLS,
author = "Graham Horton and Scott T. Leutenegger",
title = "A multi-level solution algorithm for steady-state
{Markov} chains",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "191--200",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new iterative algorithm, the multi-level algorithm,
for the numerical solution of steady state Markov
chains is presented. The method utilizes a set of
recursively coarsened representations of the original
system to achieve accelerated convergence. It is
motivated by multigrid methods, which are widely used
for fast solution of partial differential equations.
Initial results of numerical experiments are reported,
showing significant reductions in computation time,
often an order of magnitude or more, relative to the
Gauss--Seidel and optimal SOR algorithms for a variety
of test problems. It is shown how the well-known
iterative aggregation-disaggregation algorithm of
Takahashi can be interpreted as a special case of the
new method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Das:1994:AMM,
author = "Samir R. Das and Richard M. Fujimoto",
title = "An adaptive memory management protocol for {Time Warp}
parallel simulation",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "201--210",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is widely believed that Time Warp is prone to two
potential problems: an excessive amount of wasted,
rolled back computation resulting from ``rollback
thrashing'' behaviors, and inefficient use of memory,
leading to poor performance of virtual memory and/or
multiprocessor cache systems. An adaptive mechanism is
proposed based on the Cancelback memory management
protocol that dynamically controls the amount of memory
used in the simulation in order to maximize
performance. The proposed mechanism is adaptive in the
sense that it monitors the execution of the Time Warp
program, automatically adjusts the amount of memory
used to reduce Time Warp overheads (fossil collection,
Cancelback, the amount of rolled back computation,
etc.) to a manageable level. The mechanism is based on
a model that characterizes the behavior of Time Warp
programs in terms of the flow of memory buffers among
different buffer pools. We demonstrate that an
implementation of the adaptive mechanism on a Kendall
Square Research KSR-1 multiprocessor is effective in
automatically maximizing performance while minimizing
memory utilization of Time Warp programs, even for
dynamically changing simulation models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:1994:PEE,
author = "Hui Zhang and Edward W. Knightly",
title = "Providing end-to-end statistical performance
guarantees with bounding interval dependent stochastic
models",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "211--220",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper demonstrates a new, efficient, and general
approach for providing end-to-end performance
guarantees in integrated services networks. This is
achieved by modeling a traffic source with a family of
bounding interval-dependent (BIND) random variables and
by using a rate-controlled service discipline inside
the network. The traffic model stochastically bounds
the number of bits sent over time intervals of
different length. The model captures different source
behavior over different time scales by making the
bounding distribution an explicit function of the
interval length. The service discipline, RCSP, has the
priority queueing mechanisms necessary to provide
performance guarantees in integrated services networks.
In addition, RCSP provides the means for efficiently
extending the results from a single switch to a network
of arbitrary topology. These techniques are derived
analytically and then demonstrated with numerical
examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pingali:1994:CSI,
author = "Sridhar Pingali and Don Towsley and James F. Kurose",
title = "A comparison of sender-initiated and
receiver-initiated reliable multicast protocols",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "221--230",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sender-initiated reliable multicast protocols, based
on the use of positive acknowledgments (ACKs), lead to
an ACK implosion problem at the sender as the number of
receivers increases. Briefly, the ACK implosion problem
refers to the significant overhead incurred by the
sending host due to the processing of ACKs from each
receiver. A potential solution to this problem is to
shift the burden of providing reliable data transfer to
the receivers --- thus resulting in a
receiver-initiated multicast error control protocol
based on the use of negative acknowledgments (NAKs). In
this paper we determine the maximum throughputs of the
sending and receiving hosts for generic
sender-initiated and receiver-initiated protocols. We
show that the receiver-initiated error control
protocols provide substantially higher throughputs than
their sender-initiated counterparts. We further
demonstrate that the introduction of random delays
prior to generating NAKs coupled with the multicasting
of NAKs to all receivers has the potential for an
additional substantial increase in the throughput of
receiver-initiated error control protocols over
sender-initiated protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nikolaidis:1994:TPS,
author = "Ioanis Nikolaidis and Richard Fujimoto and C. Anthony
Cooper",
title = "Time-parallel simulation of cascaded statistical
multiplexers",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "231--240",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The multiplexing of several lightly loaded links onto
a more heavily loaded output link is a problem of
considerable importance to the design and traffic
engineering of many types of packet-oriented
telecommunications equipment, including that used in
Asynchronous Transfer Mode (ATM) networks. Network
configurations generally require the cascaded operation
of such multiplexers and switches. Important objectives
to achieve small cell loss ratios while maintaining
efficient utilization of the transmission links. The
small cell loss ratio objective results in extremely
long simulation runs. To address this problem, we
propose a new technique that relies on a compact
description for the arriving/departing traffic at the
multiplexers and a time-parallel scheme without fix-up
phases for effective parallelization. The technique
does not make assumptions about the analytical nature
of the arrival process, thereby allowing trace-driven
simulations to be performed as well. We demonstrate the
method for a number of configurations and traffic
scenarios, and observe that it yields one to two orders
of magnitude speedup on a 32 processor Kendall Square
Research KSR-1 multiprocessor compared to an efficient
cell-level simulation executing on a Sparc-10
workstation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Worthington:1994:SAM,
author = "Bruce L. Worthington and Gregory R. Ganger and Yale N.
Patt",
title = "Scheduling algorithms for modern disk drives",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "241--251",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183045",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk subsystem performance can be dramatically
improved by dynamically ordering, or scheduling,
pending requests. Via strongly validated simulation, we
examine the impact of complex logical-to-physical
mappings and large prefetching caches on scheduling
effectiveness. Using both synthetic workloads and
traces captured from six different user environments,
we arrive at three main conclusions: (1) Incorporating
complex mapping information into the scheduler provides
only a marginal (less than 2\%) decrease in response
times for seek-reducing algorithms. (2) Algorithms
which effectively utilize prefetching disk caches
provide significant performance improvements for
workloads with read sequentiality. The cyclical scan
algorithm (C-LOOK), which always schedules requests in
ascending logical order, achieves the highest
performance among seek-reducing algorithms for such
workloads. (3) Algorithms that reduce overall
positioning delays produce the highest performance
provided that they recognize and exploit a prefetching
cache.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1994:OMC,
author = "David M. Nicol and Shahid H. Bokhari",
title = "Optimal multiphase complete exchange on
circuit-switched hypercube architectures",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "252--260",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complete-exchange communication primitive on a
distributed memory multiprocessor calls for every
processor to send a message to every other processor,
each such message being unique. For circuit-switched
hypercube networks there are two well-known schemes for
implementing this primitive. Direct exchange minimizes
communication volume but maximizes startup costs, while
Standard Exchange minimizes startup costs at the price
of higher communication volume. This paper analyzes a
hybrid, which can be thought of as a sequence of Direct
Exchange phases, applied to variable-sized subcubes.
This paper examines the problem of determining the
optimal subcube dimension sizes $ d_i $ for every
phase. We show that optimal performance is achieved
using some equi-partition, where $ |d_i - d_j| \leq 1 $
for all phases $i$ and $j$. We study the behavior of
the optimal partition as a function of machine
communication parameters, hypercube dimension, and
message size, and show that the optimal partition can
be determined with no more than $ 2 d + 1$ comparisons.
Finally we validate the model empirically, and for
certain problem instances observe as much as a factor
of two improvement over the other methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Temam:1994:CIP,
author = "O. Temam and C. Fricker and W. Jalby",
title = "Cache interference phenomena",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "261--271",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The impact of cache interferences on program
performance (particularly numerical codes, which
heavily use the memory hierarchy) remains unknown. The
general knowledge is that cache interferences are
highly irregular, in terms of occurrence and intensity.
In this paper, the different types of cache
interferences that can occur in numerical loop nests
are identified. An analytical method is developed for
detecting the occurrence of interferences and, more
important, for computing the number of cache misses due
to interferences. Simulations and experiments on real
machines show that the model is generally accurate and
that most interference phenomena are captured.
Experiments also show that cache interferences can be
intense and frequent. Certain parameters such as array
base addresses or dimensions can have a strong impact
on the occurrence of interferences. Modifying these
parameters only can induce global execution time
variations of 30\% and more. Applications of these
modeling techniques are numerous and range from
performance evaluation and prediction to enhancement of
data locality optimizations techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache interferences or conflicts; data locality;
modeling; numerical codes; performance evaluation",
}
@Article{Danskin:1994:PXP,
author = "John Danskin and Pat Hanrahan",
title = "Profiling the {X} protocol (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "272--273",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Drapeau:1994:TWC,
author = "Ann L. Drapeau and David A. Patterson and Randy H.
Katz",
title = "Toward workload characterization of video server and
digital library applications (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "274--275",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183049",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gill:1994:CSF,
author = "Deepinder S. Gill and Songnian Zhou and Harjinder S.
Sandhu",
title = "A case study of file system workload in a large-scale
distributed environment",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "276--277",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:1994:CTD,
author = "Joseph L. Hellerstein",
title = "A comparison of techniques for diagnosing performance
problems in information systems (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "278--279",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1994:EUL,
author = "J. William Lee",
title = "Efficient user-level communication on multicomputers
with an optimistic flow-control protocol (extended
abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "280--281",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rolia:1994:MRP,
author = "J. A. Rolia and M. Starkey and G. Boersma",
title = "Modeling {RPC} performance",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "282--283",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183053",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed computing applications are collections of
processes allocated across a network that cooperate to
accomplish common goals. The applications require the
support of a distributed computing runtime environment
that provides services to help manage process
concurrency and interprocess communication. This
support helps to hide much of the inherent complexity
of distributed environments via industry standard
interfaces and permits developers to create more
portable applications. The resource requirements of the
runtime services can be significant and may impact
application performance and system throughput. This
paper describes work done to study the potential
benefits of redesigning some aspects of the DCE RPC and
its current implementation on a specific platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tayyab:1994:SPM,
author = "Athar B. Tayyab and Jon G. Kuhl",
title = "Stochastic performance models of parallel task systems
(extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "284--285",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183019.183054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers the class of parallel
computations represented by directed, acyclic task
graphs. These include parallel loops, multiphase
algorithms, partitioning and merging algorithms, as
well as any arbitrary parallel computation that can be
structured by a task graph. The paper reviews the
current state of the art in stochastic bound models of
parallel programs and presents new stochastic bound
performance models that predict the expected execution
time of parallel programs on a given shared-memory
multiprocessor system; and provide qualitative and
quantitative description of the relationships between
the structure of parallel programs, computation and
synchronization behavior of the program, and
architectural features of the underlying multiprocessor
system.The models use a new formulation based on
stochastic bound analysis and are solvable for a number
of distribution functions. They are applicable to
shared-memory multiprocessors with significantly
different architectural and synchronization performance
characteristics. The accuracy of the models is
validated via several measurements on two different
shared-memory multiprocessor systems, the Alliant
FX/2800 and the Encore Multimax. The results show the
models to be quite accurate, even when some of the
modeling assumptions are violated. The maximum error of
prediction ranges from about 10\% to under 1\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Uhlig:1994:KBM,
author = "Richard Uhlig and David Nagle and Trevor Mudge and
Stuart Sechrest",
title = "Kernel-based memory simulation (extended abstract)",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "286--287",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wabnig:1994:PPP,
author = "Harald Wabnig and G{\"u}nter Haring",
title = "Performance prediction of parallel systems with
scalable specifications --- methodology and case
study",
journal = j-SIGMETRICS,
volume = "22",
number = "1",
pages = "288--289",
month = may,
year = "1994",
CODEN = "????",
DOI = "https://doi.org/10.1145/183018.183057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:16:44 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lavenberg:1995:SPS,
author = "S. S. Lavenberg",
title = "Selected publications of the {Systems Analysis and
Systems Applications} department of the {IBM T. J.
Watson Research Center}",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "6--17",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202101",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shanley:1995:TDM,
author = "Kim Shanley and Tracy Derossett",
title = "{TPC-D} measures how quickly real-world business
questions can be answered",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "18--45",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202102",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wabnig:1995:PPP,
author = "H. Wabnig and G. Haring",
title = "Performance prediction of parallel systems with
scalable specifications --- methodology and case
study",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "46--62",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202103",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the general methodology of
specifying parallel systems within the PAPS
(Performance Analysis of Parallel Systems) toolset and
presents a case study that shows the applicability and
accuracy of the Petri net based performance prediction
tools contained in the toolset. Parallel systems are
specified in the PAPS toolset by separately defining
the program workload, the hardware resources, and the
mapping of the program to the hardware. The resource
parameterization is described in detail for a
multiprocessor computer with a store {\&} forward
communication network. The Gaussian elimination
algorithm is taken as a workload example to demonstrate
how regularly structured parallel algorithms are
modelled with acyclic task graphs. Three different
program specifications with various levels of model
accuracy are developed and their parameterization is
described. The predicted execution time is compared
with the measured execution times of the real program
on the parallel hardware. It is shown that the Petri
net based performance prediction tools provide accurate
performance predictions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:1995:QMS,
author = "Surendra M. Gupta",
title = "Queueing model with state dependent balking and
reneging: its complementary and equivalence",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "63--72",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202104",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, the concepts of complementarity and
equivalence between an {\em M/M/c/K\/} queueing model
with state dependent balking and reneging and a machine
interference problem with warm standbys are formalized.
The relationship provides insight into these queueing
systems. Through a series of corollaries, relationships
between various queueing systems are derived. It is
shown that a recently reported relationship between
Erlang loss system and a finite source queueing system
is a trivial consequence of the more general results
presented here. New results involving the arrival point
probabilities and measures of performance for these two
queueing systems are also presented. An example is also
provided.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keehn:1995:VPF,
author = "D. G. Keehn",
title = "Visualizing performance in the frequency plane",
journal = j-SIGMETRICS,
volume = "22",
number = "2--4",
pages = "73--81",
month = apr,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/202100.202105",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:43 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A method of showing the performance limiting effects
of a product form queueing network as lines, planes,
etc in a $J$ dimensional space is given. The location
of a certain critical point (Little's Law Point) in
this space allows the asymptotic calculation of the
normalizing constant G(K) of the network. This Little's
Law point (LLP) is found by applying Little's Law to
the augmented system generating function of the BCMP
[1] network. The computational complexity of this
algorithm is the Order (number of chains cubed * number
of service centers in the system). Comparisons of
numerical accuracy with other methods (Convolution, and
another asymptotic method) are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chapin:1995:MSP,
author = "John Chapin and A. Herrod and Mendel Rosenblum and
Anoop Gupta",
title = "Memory system performance of {UNIX} on {CC-NUMA}
multiprocessors",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "1--13",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This study characterizes the performance of a variant
of UNIX SVR4 on a large shared-memory multiprocessor
and analyzes the effects of possible OS and
architectural changes. We use a nonintrusive cache miss
monitor to trace the execution of an OS-intensive
multiprogrammed workload on the Stanford DASH, a 32-CPU
CC-NUMA multiprocessor (CC-NUMA multiprocessors have
cache-coherent shared memory that is physically
distributed across the machine). We find that our
version of UNIX accounts for 24\% of the workload's
total execution time. A surprisingly large fraction of
OS time (79\%) is spent on memory system stalls,
divided equally between instruction and data cache miss
time. In analyzing techniques to reduce instruction
cache miss stall time, we find that replication of only
7\% of the OS code would allow 80\% of instruction
cache misses to be serviced locally on a CC-NUMA
machine. For data cache misses, we find that a small
number of routines account for 96\% of OS data cache
stall time. We find that most of these misses are
coherence (communication) misses, and larger caches
will not necessarily help. After presenting detailed
performance data, we analyze the benefits of several OS
changes and predict the effects of altering the cache
configuration, degree of clustering, and cache
coherence mechanism of the machine. (This paper is
available via \url{http://wwwflash.stanford.edu}.)",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bedichek:1995:TFA,
author = "Robert C. Bedichek",
title = "{Talisman}: fast and accurate multicomputer
simulation",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "14--24",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223589",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Talisman is a simulator that models the execution
semantics and timing of a multicomputer. Talisman is
unique in combining high semantic accuracy, high timing
accuracy, portability, {\em and\/} good performance.
This good performance allows users to run significant
programs on large simulated multicomputers. The
combination of high accuracy and good performance
yields an ideal tool for evaluating architectural
trade-offs. Talisman models the semantics of virtual
memory, a circuit-switched internode interconnect, I/O
devices, and instruction execution in both user and
supervisor modes. It also models the timing of
processor pipelines, caches, local memory buses, and a
circuit-switched interconnect. Talisman executes the
same program binary images as a hardware prototype at a
cost of about 100 host instructions per simulated
instruction. On a suite of accuracy benchmarks run on
the hardware and the simulator, Talisman and the
prototype differ in reported running times by only a
few percent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:1995:RDV,
author = "Leana Golubchik and John C. S. Lui and Richard Muntz",
title = "Reducing {I/O} demand in video-on-demand storage
servers",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "25--36",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223590",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent technological advances have made multimedia
on-demand services, such as home entertainment and
home-shopping, important to the consumer market. One of
the most challenging aspects of this type of service is
providing access either instantaneously or within a
small and reasonable latency upon request. In this
paper, we discuss a novel approach, termed adaptive
piggybacking, which can be used to provide on-demand or
nearly-on-demand service and at the same time reduce
the I/O demand on the multimedia storage server.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghandeharizadeh:1995:CSD,
author = "Shahram Ghandeharizadeh and Seon Ho Kim and Cyrus
Shahabi",
title = "On configuring a single disk continuous media server",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "37--46",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223591",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The past decade has witnessed a proliferation of
repositories that store and retrieve continuous media
data types, e.g., audio and video objects. These
repositories are expected to play a major role in
several emerging applications, e.g., library
information systems, educational applications,
entertainment industry, etc. To support the display of
a video object, the system partitions each object into
fixed size blocks. All blocks of an object reside
permanently on the disk drive. When displaying an
object, the system stages the blocks of the object into
memory one at a time for immediate display. In the
presence of multiple displays referencing different
objects, the bandwidth of the disk drive is multiplexed
among requests, introducing disk seeks. Disk seeks
reduce the useful utilization of the disk bandwidth and
result in a lower number of simultaneous displays
(throughput).This paper characterizes the impact of
disk seeks on the throughput of the system. It
describes REBECA as a mechanism that maximizes the
throughput of the system by minimizing the time
attributed to each incurred seek. A limitation of
REBECA is that it increases the latency observed by
each request. We quantify this throughput vs latency
tradeoff of REBECA and, develop an efficient technique
that computes its configuration parameters to realize
the performance requirements (desired latency and
throughput) of an application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krunz:1995:TMC,
author = "Marwan Krunz and Herman Hughes",
title = "A traffic for {MPEG}-coded {VBR} streams",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "47--55",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223592",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Compression of digital video is the only viable means
to transport real-time full-motion video over BISDN/ATM
networks. Traffic streams generated by video
compressors exhibit complicated patterns which vary
from one compression scheme to another. In this paper
we investigate the traffic characteristics of video
streams which are compressed based on the MPEG
standard. Our study is based on 23 minutes of video
obtained from an entertainment movie. A particular
significance of our data is that it contains all types
of coded frames, namely: Intra-coded (I), Prediction
(P), and Bidirectional (B) MPEG frames. We describe the
statistical behavior of the VBR stream using histograms
and autocorrelation functions. A procedure is developed
to determine the instants of a scene change based on
the changes in the size of successive $I$ frames. It is
found that the length of a scene can be modeled by a
geometric distribution. A model for an MPEG traffic
source is developed in which frames are generated
according to the compression pattern of the captured
video stream. For each frame type, the number of cells
per frame is fitted by a lognormal distribution whose
parameters are determined by the frame type. The
appropriateness and limitations of the model are
examined by studying the multiplexing performance of
MPEG streams. Simulations of an ATM multiplexer are
conducted, in which traffic sources are derived from
the measured VBR trace as well as the proposed model.
The queueing performance in both cases is found to be
relatively close.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:1995:NTM,
author = "Carey L. Williamson",
title = "Network traffic measurement and modeling",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "56--57",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223593",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network traffic measurement and workload
characterization are key steps in the workload modeling
process. Much has been learned through network
measurement and workload modeling in the last ten
years, but new challenges are now at the forefront:
measuring network traffic in the Internet environment,
understanding the implications of network traffic
structure (e.g., self-similarity, autocorrelation, long
range dependence), and accurate modeling of network
traffic workloads for high speed network environments.
This `hot topic' session brings together three
prominent speakers to address each of these topics, in
turn.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gelenbe:1995:GNN,
author = "Erol Gelenbe",
title = "{G}-networks: new queueing models with additional
control capabilities",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "58--59",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.376966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This Hot-Topics Session on G-Networks aims at bringing
these relatively new models which we introduced for the
first time in 1989 and 1990, to the attention of the
performance evaluation and modeling community. The
session includes presentations by Peter Harrison, Onno
Boxma, Jean-Michel Fourneau and myself. We will cover
the basic concepts, some examples of potential
applications, as well as recent research efforts in
this area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tridandapani:1995:FPF,
author = "Srini Tridandapani and Anton T. Dahbura and Charles U.
Martel and John Matthews and Arun K. Somani",
title = "Free performance and fault tolerance (extended
abstract): using system idle capacity efficiently",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "60--61",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223594",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Malony:1995:DIE,
author = "Allen D. Malony",
title = "Data interpretation and experiment planning in
performance tools",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "62--63",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223595",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The parallel scientific computing community is placing
increasing emphasis on portability and scalability of
programs, languages, and architectures. This creates
new challenges for developers of parallel performance
analysis tools, who will have to deal with increasing
volumes of performance data drawn from diverse
platforms. One way to meet this challenge is to
incorporate sophisticated facilities for data
interpretation and experiment planning within the tools
themselves, giving them increased flexibility and
autonomy in gathering and selecting performance data.
This panel discussion brings together four research
groups that have made advances in this direction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaidya:1995:CTL,
author = "Nitin H. Vaidya",
title = "A case for two-level distributed recovery schemes",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "64--73",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223596",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most distributed and multiprocessor recovery schemes
proposed in the literature are designed to tolerate
arbitrary number of failures. In this paper, we
demonstrate that, it is often advantageous to use
`two-level' recovery schemes. A {\em two-level\/}
recovery scheme tolerates the {\em more probable\/}
failures with low performance overhead, while the less
probable failures may be tolerated with a higher
overhead. By minimizing the overhead for the more
frequently occurring failure scenarios, our approach is
expected to achieve lower performance overhead (on
average) as compared to existing recovery schemes. To
demonstrate the advantages of two-level recovery, we
evaluate the performance of a recovery scheme that
takes two different types of checkpoints, namely,
1-checkpoints and $N$-checkpoints. A single failure can
be tolerated by rolling the system back to a
1-checkpoint, while multiple failure recovery is
possible by rolling back to an $N$-checkpoint. For such
a system, we demonstrate that to minimize the average
overhead, it is often necessary to take {\em both\/}
1-checkpoints and $N$-checkpoints. While the
conclusions of this paper are intuitive, the work on
design of appropriate recovery schemes is lacking. The
objective of this paper is to motivate research into
recovery schemes that can provide multiple levels of
fault tolerance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epema:1995:ADU,
author = "D. H. J. Epema",
title = "An analysis of decay-usage scheduling in
multiprocessors",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "74--85",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Priority-aging or decay-usage scheduling is a
time-sharing scheduling policy capable of dealing with
a workload of both interactive and batch jobs by
decreasing the priority of a job when it acquires CPU
time, and by increasing its priority when it does not
use the (a) CPU. In this paper we deal with a
decay-usage scheduling policy in multiprocessor systems
modeled after widely used systems. The priority of a
job consists of a base priority and a time-dependent
part based on processor usage. Because the priorities
in our model are time dependent, a queueing-theoretic
analysis, for instance for the mean response time,
seems impossible. Still, it turns out that as a
consequence of the scheduling policy, the shares of
available CPU time obtained by jobs converge, and a
deterministic analysis for these shares is feasible:
for a fixed set of jobs with very large (infinite)
processing demands, we derive the relation between
their base priorities and their steady-state shares. In
addition, we analyze the relation between the values of
the parameters of the scheduler and the level of
control it can exercise over the steady-state shares.
We validate the model by simulations and by
measurements of actual systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elwalid:1995:FRP,
author = "Anwar Elwalid and Daniel Heyman and T. V. Lakshman and
Debasis Mitra and Alan Weiss",
title = "Fundamental results on the performance of {ATM}
multiplexers with applications to video
teleconferencing",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "86--97",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223598",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The main contributions of this paper are two-fold.
First, we prove fundamental, similarly behaving lower
and upper bounds, and give an approximation based on
the bounds, which is effective for analyzing ATM
multiplexers, even when the traffic has many, possibly
heterogeneous, sources and their models are of high
dimension. Second, we apply our analytic approximation
to statistical models of video teleconference traffic,
obtain the multiplexing system's capacity as determined
by the number of admissible sources for given cell loss
probability, buffer size and trunk bandwidth, and,
finally, compare with results from simulations, which
are driven by actual data from coders. The results are
surprisingly close. Our bounds are based on Large
Deviations theory. Our approximation has two easily
calculated parameters, one is from Chernoff's theorem
and the other is the system's dominant eigenvalue. A
broad range of systems are analyzed and the time for
analysis in each case is a fraction of a second.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Knightly:1995:FLT,
author = "Edward W. Knightly and Dallas E. Wrege and J{\"o}rg
Liebeherr and Hui Zhang",
title = "Fundamental limits and tradeoffs of providing
deterministic guarantees to {VBR} video traffic",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "98--107",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223599",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Compressed digital video is one of the most important
traffic types in future integrated services networks.
However, a network service that supports
delay-sensitive video imposes many problems since
compressed video sources are variable bit rate (VBR)
with a high degree of burstiness. In this paper, we
consider a network service that can provide
deterministic guarantees on the minimum throughput and
the maximum delay of VBR video traffic. A common belief
is that due to the burstiness of VBR traffic, such a
service will not be efficient and will necessarily
result in low network utilization. We investigate the
fundamental limits and tradeoffs in providing
deterministic performance guarantees to video and use a
set of 10 to 90 minute long MPEG-compressed video
traces for evaluation. Contrary to conventional wisdom,
we are able to show that, in many cases, a
deterministic service can be provided to video traffic
while maintaining a reasonable level of network
utilization. We first consider an ideal network
environment that employs the most accurate
deterministic, time-invariant video traffic
characterizations, Earliest-Deadline-First packet
schedulers, and exact admission control conditions. The
utilization achievable in this situation provides the
fundamental limits of a deterministic service. We then
investigate the utilization limits in a network
environment that takes into account practical
constraints, such as the need for fast policing
mechanisms, simple packet scheduling algorithms, and
efficient admission control tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fang:1995:EBW,
author = "Youjian Fang and Michael Devetsikiotis and Ioannis
Lambadaris and A. Roger Kaye",
title = "Exponential bounds for the waiting time distribution
in {Markovian} queues, with applications to {TES/GI/1}
systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "108--115",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several services to be supported by emerging
high-speed networks are expected to result in highly
{\em bursty\/} (autocorrelated) traffic streams. A
typical example is variable bit-rate (VBR) compressed
video. Therefore, traffic modeling and performance
evaluation techniques geared towards autocorrelated
streams are extremely important for the design of
practical networks. The {\em TES\/} (Transform ---
Expand --- Sample) technique has emerged as a general
methodology for modeling autocorrelated random
processes with arbitrary marginal distributions.
Because of their generality and practical
applicability, TES models can be readily used to
accurately characterize bursty traffic streams in ATM
networks. Although TES models can be easily implemented
for simulation studies, the need still exists for {\em
analytical\/} results on the performance of queueing
systems driven by autocorrelated traffic. Of particular
interest are the tails of the waiting time distribution
in queues driven by TES-modeled bursty traffic. Such
tail probabilities, when they become exceedingly small,
may be difficult to obtain via conventional simulation.
In order to extend existing results, based on Large
Deviations theory, to TES processes, the main
difficulty is posed by the continuous state-space of
the TES time-series. In this paper, we develop a
general result concerning exponential bounds for the
waiting time under {\em continuous state-space\/}
Markov arrivals. We apply this result to {\em TES/GI\/}
/1 queues, show numerical examples, and compare our
bound with simulation results. Accurate estimates of
extremely low probabilities are obtained by employing
fast simulation techniques based on {\em importance
sampling.\/}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:1995:OPA,
author = "S. C. Borst",
title = "Optimal probabilistic allocation of customer types to
servers",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "116--125",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223601",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The model under consideration consists of $n$ customer
types attended by $m$ parallel non-identical servers.
Customers are allocated to the servers in a
probabilistic manner; upon arrival customers are sent
to one of the servers according to an $ m \times n$
matrix of routing probabilities. We consider the
problem of finding an allocation that minimizes a
weighted sum of the mean waiting times. We expose the
structure of an optimal allocation and describe for
some special cases in detail how the structure may be
exploited in actually determining an optimal
allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Matta:1995:ZIS,
author = "Ibrahim Matta and A. Udaya Shankar",
title = "{Z}-iteration: a simple method for throughput
estimation in time-dependent multi-class systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "126--135",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223602",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiple-class multiple-resource (MCMR) systems, where
each class of customers requires a particular set of
resources, are common. These systems are often analyzed
under steady-state conditions. We describe a simple
method, referred to as {\em Z-iteration}, to estimate
both transient and steady-state performances of such
systems. The method makes use of results and techniques
available from queueing theory, network analysis,
dynamic flow theory, and numerical analysis. We show
the generality of the Z-iteration by applying it to an
ATM network, a parallel disk system, and a distributed
batch system. Validations against discrete-event
simulations show the accuracy and computational
advantages of the Z-iteration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1995:SRL,
author = "Peter M. Chen and Edward K. Lee",
title = "Striping in a {RAID} level 5 disk array",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "136--145",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223603",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Redundant disk arrays are an increasingly popular way
to improve I/O system performance. Past research has
studied how to stripe data in non-redundant (RAID Level
0) disk arrays, but none has yet been done on how to
stripe data in redundant disk arrays such as RAID Level
5, or on how the choice of striping unit varies with
the number of disks. Using synthetic workloads, we
derive simple design rules for striping data in RAID
Level 5 disk arrays given varying amounts of workload
information. We then validate the syntheticly-derived
design rules using real workload traces to show that
the design rules apply well to real systems. We find no
difference in the optimal striping units for RAID Level
0 and 5 for read-intensive workloads. For
write-intensive workloads, in contrast, the overhead of
maintaining parity causes full-stripe writes (writes
that span the entire error-correction group) to be more
efficient than read-modify writes or reconstruct
writes. This additional factor causes the optimal
striping unit for RAID Level 5 to be four times smaller
for write-intensive workloads than for read-intensive
workloads. We next investigate how the optimal striping
unit varies with the number of disks in an array. We
find that the optimal striping unit for reads in a RAID
Level 5 varies {\em inversely\/} to the number of
disks, but that the optimal striping unit for writes
varies {\em with\/} the number of disks. Overall, we
find that the optimal striping unit for workloads with
an unspecified mix of reads and writes is {\em
independent\/} of the number of disks. Together, these
trends lead us to recommend (in the absence of specific
workload information) that the striping unit over a
wide range of RAID Level 5 disk array sizes be equal to
1/2 * average positioning time * disk transfer rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Worthington:1995:LES,
author = "Bruce L. Worthington and Gregory R. Ganger and Yale N.
Patt and John Wilkes",
title = "On-line extraction of {SCSI} disk drive parameters",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "146--156",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223604",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sophisticated disk scheduling algorithms require
accurate, detailed disk drive specifications, including
data about mechanical delays, on-board caching and
prefetching algorithms, command and protocol overheads,
and logical-to-physical block mappings. Comprehensive
disk models used in storage subsystem design require
similar levels of detail. We describe a suite of
general-purpose algorithms and techniques for acquiring
the necessary information from a SCSI disk drive. Using
only the ANSI-standard interface, we demonstrate how
the important parameter values of a modern SCSI drive
can be determined accurately and efficiently.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:1995:DDD,
author = "Joel L. Wolf and Philip S. Yu and Hadas Shachnai",
title = "{DASD} dancing: a disk load balancing optimization
scheme for video-on-demand computer systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "157--166",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223605",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For a video-on-demand computer system we propose a
scheme which balances the load on the disks, thereby
helping to solve a performance problem crucial to
achieving maximal video throughput. Our load balancing
scheme consists of two stages. The static stage
determines good assignments of videos to groups of
striped disks. The dynamic phase uses these
assignments, and features a DASD dancing algorithm
which performs real-time disk scheduling in an
effective manner. Our scheme works synergisticly with
disk striping. We examine the performance of the DASD
dancing algorithm via simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sandhu:1995:ASD,
author = "Harjinder S. Sandhu and Kenneth C. Sevcik",
title = "An analytic study of dynamic hardware and software
cache coherence strategies",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "167--177",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223606",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic software cache coherence strategies use
information about program sharing behaviour to manage
caches at run-time and at a granularity defined by the
application. The program-level information is obtained
through annotations placed into the application by the
user or the compiler. The coherence protocols may range
from simple static algorithms to dynamic algorithms
that use run-time data structures similar to the
directories used in hardware strategies. In this paper,
we present an analytic study of five dynamic software
cache coherence algorithms and compare these to a
representative hardware coherence strategy. The
analytic model is constructed using four input
parameters --- write probability, locality,
granularity, and system size --- and solved by analysis
of a Markov chain. We show that the fundamental
tradeoffs between the different hardware and software
strategies are captured in this model. The results of
the study show that hardware schemes perform better for
fine-grained data structures for much of the parameter
space that we study. However, for coarse-grained data
structures, various software algorithms are dominant
over most of the parameter space. Further, hardware
strategies are found to be more susceptible to the
effects of contention, and also perform worse for the
asymmetric workload that we study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brorsson:1995:SPT,
author = "Mats Brorsson",
title = "{SM-prof}: a tool to visualise and find cache
coherence performance bottlenecks in multiprocessor
programs",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "178--187",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223607",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache misses due to coherence actions are often the
major source for performance degradation in cache
coherent multiprocessors. It is often difficult for the
programmer to take cache coherence into account when
writing the program since the resulting access pattern
is not apparent until the program is executed. SM-prof
is a performance analysis tool that addresses this
problem by visualising the shared data access pattern
in a diagram with links to the source code lines
causing performance degrading access patterns. The
execution of a program is divided into time slots and
each data block is classified based on the accesses
made to the block during a time slot. This enables the
programmer to follow the execution over time and it is
possible to track the exact position responsible for
accesses causing many cache misses related to coherence
actions. Matrix multiplication and the MP3D application
from SPLASH are used to illustrate the use of SM-prof.
For MP3D, SM-prof revealed performance limitations that
resulted in a performance improvement of over 75\%.The
current implementation is based on program-driven
simulation in order to achieve non-intrusive profiling.
If a small perturbation of the program execution is
acceptable, it is also possible to use software tracing
techniques given that a data address can be related to
the originating instruction.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:1995:SIP,
author = "Pei Cao and Edward W. Felten and Anna R. Karlin and
Kai Li",
title = "A study of integrated prefetching and caching
strategies",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "188--197",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223608",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Prefetching and caching are effective techniques for
improving the performance of file systems, but they
have not been studied in an integrated fashion. This
paper proposes four properties that optimal integrated
strategies for prefetching and caching must satisfy,
and then presents and studies two such integrated
strategies, called {\em aggressive\/} and {\em
conservative.\/} We prove that the performance of the
{\em conservative\/} approach is within a factor of two
of optimal and that the performance of the {\em
aggressive\/} strategy is a factor significantly less
than twice that of the optimal case. We have evaluated
these two approaches by trace-driven simulation with a
collection of file access traces. Our results show that
the two integrated prefetching and caching strategies
are indeed close to optimal and that these strategies
can reduce the running time of applications by up to
50\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivasubramaniam:1995:CBR,
author = "Anand Sivasubramaniam and Aman Singla and Umakishore
Ramachandran and H. Venkateswaran",
title = "On characterizing bandwidth requirements of parallel
applications",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "198--207",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223609",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Synthesizing architectural requirements from an
application viewpoint can help in making important
architectural design decisions towards building large
scale parallel machines. In this paper, we quantify the
link bandwidth requirement on a binary hypercube
topology for a set of five parallel applications. We
use an execution-driven simulator called SPASM to
collect data points for system sizes that are feasible
to be simulated. These data points are then used in a
regression analysis for projecting the link bandwidth
requirements for larger systems. The requirements are
projected as a function of the following system
parameters: number of processors, CPU clock speed, and
problem size. These results are also used to project
the link bandwidths for other network topologies. Our
study quantifies the link bandwidth that has to be made
available to limit the network overhead in an
application to a specified tolerance level. The results
show that typical link bandwidths (200-300 MBytes/sec)
found in current commercial parallel architectures
(such as Intel Paragon and Cray T3D) would have fairly
low network overhead for the applications considered in
this study. For two of the applications, this overhead
is negligible. For the other applications, this
overhead can be limited to about 30\% of the execution
time provided the problem sizes are increased
commensurate with the processor clock speed. The
technique presented can be useful to a system architect
to synthesize the bandwidth requirements for realizing
well-balanced parallel architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McCann:1995:SMC,
author = "Cathy McCann and John Zahorjan",
title = "Scheduling memory constrained jobs on distributed
memory parallel computers",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "208--219",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223610",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of multiprocessor scheduling
of jobs whose memory requirements place lower bounds on
the fraction of the machine required in order to
execute. We address three primary questions in this
work:1. How can a parallel machine be multiprogrammed
with minimal overhead when jobs have minimum memory
requirements?2. To what extent does the inability of an
application to repartition its workload during runtime
affect the choice of processor allocation policy?3. How
rigid should the system be in attempting to provide
equal resource allocation to each runnable job in order
to minimize average response time? This work is
applicable both to parallel machines and to networks of
workstations supporting parallel applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lebeck:1995:AMN,
author = "Alvin R. Lebeck and David A. Wood",
title = "Active memory: a new abstraction for memory-system
simulation",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "220--230",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223611",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes the {\em active memory\/}
abstraction for memory-system simulation. In this
abstraction---designed specifically for on-the-fly
simulation, memory references logically invoke a
user-specified function depending upon the reference's
type and accessed memory block state. Active memory
allows simulator writers to specify the appropriate
action on each reference, including `no action' for the
common case of cache hits. Because the abstraction
hides implementation details, implementations can be
carefully tuned for particular platforms, permitting
much more efficient on-the-fly simulation than the
traditional trace-driven abstraction. Our SPARC
implementation, {\em Fast-Cache}, executes simple data
cache simulations two or three times faster than a
highly-tuned trace-driven simulator and only 2 to 7
times slower than the original program. Fast-Cache
implements active memory by performing a fast table
look up of the memory block state, taking as few as 3
cycles on a SuperSPARC for the no-action case. Modeling
the effects of Fast-Cache's additional lookup
instructions qualitatively shows that Fast-Cache is
likely to be the most efficient simulator for miss
ratios between 3\% and 40\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:1995:CTD,
author = "Edmundo {de Souza e Silva} and H. Richard Gail and
Reinaldo {Vallejos Campos}",
title = "Calculating transient distributions of cumulative
reward",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "231--240",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223612",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov reward models have been employed to obtain
performability measures of computer and communication
systems. In these models, a continuous time Markov
chain is used to represent changes in the system
structure, usually caused by faults and repairs of its
components, and reward rates are assigned to states of
the model to indicate some measure of accomplishment at
each structure. A procedure to calculate numerically
the distribution of the reward accumulated over a
finite observation period is presented. The development
is based solely on probabilistic arguments, and the
final recursion is quite simple. The algorithm has a
low computational cost in terms of model parameters. In
fact, the number of operations is linear in a parameter
that is smaller than the number of rewards, while the
storage required is independent of the number of
rewards. We also consider the calculation of the
distribution of cumulative reward for models in which
impulse based rewards are associated with
transitions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carrasco:1995:RRT,
author = "Juan A. Carrasco and Angel Calder{\'o}n",
title = "Regenerative randomization: theory and application
examples",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "241--252",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223613",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Randomization is a popular method for the transient
solution of continuous-time Markov models. Its primary
advantages over other methods (i.e., ODE solvers) are
robustness and ease of implementation. It is however
well-known that the performance of the method
deteriorates with the `stiffness' of the model: the
number of required steps to solve the model up to time
$t$ tends to {\Lambda} $t$ for {\Lambda} $t$
{\rightarrow} {\infty}. In this paper we present a new
method called regenerative randomization and apply it
to the computation of two transient measures for
rewarded irreducible Markov models. Regarding the
number of steps required in regenerative randomization
we prove that: (1) it is smaller than the number of
steps required in standard randomization when the
initial distribution is concentrated in a single state,
(2) for $ \Lambda t \rightarrow \infty $, it is upper
bounded by a function $ O(\log (\Lambda t /
\epsilon))$, where $ \epsilon $ is the desired relative
approximation error bound. Using dependability and
performability examples we analyze the performance of
the method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1995:CTA,
author = "Albert G. Greenberg and R. Srikant",
title = "Computational techniques for accurate performance
evaluation of multirate, multihop communication
networks",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "253--260",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223614",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computational techniques are presented for
connection-level performance evaluation of
communication networks, with stochastic multirate
traffic, state dependent admission control, alternate
routing, and general topology --- all characteristics
of emerging integrated service networks. The techniques
involve solutions of systems of fixed point equations,
which estimate equilibrium network behavior. Though
similar techniques have been applied with success to
single-rate fully connected networks, the curse of
dimensionality arises when the techniques are extended
to multirate, multihop networks, and the cost of
solving the fixed point equations exactly is
exponential. This exponential barrier is skirted by
exploiting, in particular, a close relationship with
the network reliability problem, and by borrowing
effective heuristics from the reliability domain. A
series of experiments are reported on, comparing the
estimates from the new techniques to the results of
discrete event simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ott:1995:IET,
author = "Teun Ott",
title = "The {Internet} in evolution, and {TCP} over {ATM}
(panel session)",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "261--262",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223615",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Trivedi:1995:NMP,
author = "Kishor S. Trivedi and A. Bobbio and G. Ciardo and R.
German and A. Puliafito and M. Telek",
title = "Non-{Markovian} {Petri} nets",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "263--264",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223616",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Non-Markovian models allow us to capture a very wide
range of circumstances in which it is necessary to
model phenomena whose times to occurrence is not
exponentially distributed. Events such as timeouts in a
protocol, service times at a machine performing the
same task on each part, and memory access or
instruction execution in a low-level h/w or s/w model,
have durations which are constant or with a very low
variance. Phase-type distributions can be used to
approximate a non-exponential, but they increase the
size of the state space. The analysis of stochastic
systems with non-exponential timing is of increasing
interest in the literature and requires the development
of suitable modeling tools. Recently, some effort has
been devoted to generalize the concept of {\em
Stochastic Petri Nets\/} (SPN), by allowing the firing
times to be generally distributed. A particular case of
non-Markovian {\em SPN}, is the class of {\em
Deterministic and SPN (DSPN)\/} [1]. A {\em DSPN\/} is
a non-Markovian {\em SPN\/} where, in each marking, at
most one transition is allowed to have a deterministic
firing time with enabling memory policy. A new class of
stochastic Petri nets has recently been defined [2, 3]
by generalizing the deterministic firing times of the
DSPN to generally distributed firing times. The
underlying stochastic process for these classes of
Petri nets is a {\em Markov Regenerative Process\/}
(MRGP). This observation has opened a very fertile line
of research aimed at the definition of solvable classes
of models whose underlying marking process is an MRGP,
and therefore referred to as {\em Markov Regenerative
Stochastic Petri Nets (MRSPN).\/} Some of the results
in this filed will be described in the session. In
particular, Ciardo investigates stochastic confusion by
defining the selection probability for transitions
attempting to fire at the same time. German introduces
the `method of supplementary variables' for the
derivation of state equations describing the transient
behavior of the marking process. Puliafito describes
how, under some constraints, concurrent enabling of
several generally distributed timed transitions is
allowed. Bobbio and Telek discuss how age memory policy
can be included to capture preemptive mechanisms of the
resume {\em (prs)\/} type.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Erramilli:1995:PIS,
author = "Ashok Erramilli",
title = "Performance impacts of self-similarity in traffic",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "265--266",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223617",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent measurement studies in Bellcore and elsewhere
have convincingly established the presence of
statistical self similarity in high-speed network
traffic. What is less clear --- and as such the subject
of intense current research --- is the impact of the
self-similarity on network performance. Given that
traditional queueing models of network performance do
not model self-similarity, the validity of traditional
models to predict network performance would be
supported if it is shown that self-similarity does not
have measurable impacts on performance. On the other
hand, if the converse of this assertion were true, it
would have significant impacts on the way networks are
designed and analyzed, as well as open up new areas of
research in mathematical modeling, queueing analysis,
network design and control. The issues addressed in
this session are therefore of fundamental importance in
high-speed network research. Given that queueing
behavior is dominated by traffic characteristics over
the time scales of busy periods, it has been argued
that phenomena that span many time scales, such as
self-similarity, should not be relevant for queueing
performance. However, the paper by Narayan, Erramilli
and Willinger presents evidence that for data traffic,
the long range dependence (which is related to the
self-similarity in traffic) can dominate queueing
behavior under a variety of conditions. Specifically,
it is shown based on a series of carefully designed
simulation experiments with actual traffic traces, that
the queueing behavior with actual traces is
considerably heavier than that predicted by traditional
theory, and that these differences are attributable to
long range dependence. The paper by Heyman and Lakshman
investigates modeling of video traffic to predict cell
loss performance with finite buffer systems, and they
conclude that long-range dependence is not a crucial
property in determining the finite buffer behavior of
video conferences. In particular, a Markov chain model
that does not model long-range dependence is
nevertheless able to reproduce various operating
characteristics over a wide range of loadings obtained
with the actual video trace. Mukherjee, Adas, Klivansky
and Song investigate the performance impacts of
short-range and long-range correlation components using
simulations with a fractional ARIMA model. They also
discuss a strategy to provide quality of service
guarantees with long range dependent traffic, as well
as recent results on NSFNET traffic. Finally, the paper
by Li describes a frequency-domain based analytical
tool that matches a special class of Markov chains with
traces exhibiting a variety of characteristics,
including long-range dependence. Good agreement is
reported between analytical queueing solutions of the
matched Markov chains, and simulation results obtained
video and data traffic traces. This session therefore
brings together a wide range of viewpoints on this
issue. Resolution of such seemingly conflicting
conclusions lies in the fact that in performance
analysis, answers sensitively depend on the specific
details of a problem. Thus the proper question to ask
is not whether or not self-similarity matters in
queueing; but under what conditions it matters.
Likewise, the question to ask is not whether a class of
models is invalid; but to identify the conditions under
which traditional Markov or self-similar traffic models
are expected to be valid. Finally, given an
understanding of statistical features that are relevant
to a given problem, the challenge is to model these
accurately and parsimoniously so that the model is
useful in practical performance analysis. The work
outlined in the abstracts below adds significantly to
our understanding of these issues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arpaci:1995:IPS,
author = "Remzi H. Arpaci and Andrea C. Dusseau and Amin M.
Vahdat and Lok T. Liu and Thomas E. Anderson and David
A. Patterson",
title = "The interaction of parallel and sequential workloads
on a network of workstations",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "267--278",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines the plausibility of using a
network of workstations (NOW) for a mixture of parallel
and sequential jobs. Through simulations, our study
examines issues that arise when combining these two
workloads on a single platform. Starting from a
dedicated NOW just for parallel programs, we
incrementally relax uniprogramming restrictions until
we have a multi-programmed, multi-user NOW for both
interactive sequential users and parallel programs. We
show that a number of issues associated with the
distributed NOW environment (e.g., daemon activity,
coscheduling skew) can have a small but noticeable
effect on parallel program performance. We also find
that efficient migration to idle workstations is
necessary to maintain acceptable parallel application
performance. Furthermore, we present a methodology for
deriving an optimal delay time for recruiting idle
machines for use by parallel programs; this {\em
recruitment threshold\/} was just 3 minutes for the
research cluster we measured. Finally, we quantify the
effects of the additional parallel load upon
interactive users by keeping track of the potential
number of {\em user delays\/} in our simulations. When
we limit the maximum number of delays per user, we can
still maintain acceptable parallel program performance.
In summary, we find that for our workloads a 2:1 rule
applies: a NOW cluster of approximately 60 machines can
sustain a 32-node parallel workload in addition to the
sequential load placed upon it by interactive users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Myllymaki:1995:DTJ,
author = "Jussi Myllymaki and Miron Livny",
title = "Disk-tape joins: synchronizing disk and tape access",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "279--290",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today large amounts of data are stored on tertiary
storage media such as magnetic tapes and optical disks.
DBMSs typically operate only on magnetic disks since
they know how to maneuver disks and how to optimize
accesses on them. Tertiary devices present a problem
for DBMSs since these devices have dismountable media
and have very different operational characteristics
compared to magnetic disks. For instance, most tape
drives offer very high capacity at low cost but are
accessed sequentially, involve lengthy latencies, and
deliver lower bandwidth. Typically, the scope of a
DBMS's query optimizer does not include tertiary
devices, and the DBMS might not even know how to
control and operate upon tertiary-resident data. In a
three-level hierarchy of storage devices (main memory,
disk, tape), the typical solution is to elevate
tape-resident data to disk devices, thus bringing such
data into the DBMS' control, and then to perform the
required operations on disk. This requires additional
space on disk and may not give the lowest response time
possible. With this challenge in mind, we studied the
trade-offs between memory and disk requirements and the
execution time of a join with the help of two
well-known join methods. The conventional, disk-based
Nested Block Join and Hybrid Hash Join were modified to
operate directly on tapes. An experimental
implementation of the modified algorithms gave us more
insight into how the algorithms perform in practice.
Our performance analysis shows that a DBMS desiring to
operate on tertiary storage will benefit from special
algorithms that operate directly on tape-resident data
and take into account and exploit the mismatch in disk
and tape characteristics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "concurrent I/O; join methods; tertiary storage",
}
@Article{Phalke:1995:IRG,
author = "Vidyadhar Phalke and Bhaskarpillai Gopinath",
title = "An inter-reference gap model for temporal locality in
program behavior",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "291--300",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223620",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The property of locality in program behavior has been
studied and modelled extensively because of its
application to memory design, code optimization,
multiprogramming etc. We propose a $k$ order Markov
chain based scheme to model the sequence of time
intervals between successive references to the same
address in memory during program execution. Each unique
address in a program is modelled separately. To
validate our model, which we call the Inter-Reference
Gap (IRG) model, we show substantial improvements in
three different areas where it is applied. (1) We
improve upon the miss ratio for the Least Recently Used
(LRU) memory replacement algorithm by up to 37\%. (2)
We achieve up to 22\% space-time product improvement
over the Working Set (WS) algorithm for dynamic memory
management. (3) A new trace compression technique is
proposed which compresses up to 2.5\% with zero error
in WS simulations and up to 3.7\% error in the LRU
simulations. All these results are obtained
experimentally, via trace driven simulations over a
wide range of cache traces, page reference traces,
object traces and database traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic memory management; locality of reference;
Markov chains; memory replacement; prediction; trace
compaction; trace driven simulation",
}
@Article{Braams:1995:BCP,
author = "Jan Braams",
title = "Batch class process scheduler for {Unix SVR4}",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "301--302",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223621",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Donatelli:1995:SSR,
author = "S. Donatelli and G. Franceschinis",
title = "State space reductions using stochastic well-formed
net simplifications: an application to random polling
systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "303--304",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balsamo:1995:ART,
author = "S. Balsamo and I. Mura",
title = "Approximate response time distribution in {Fork} and
{Join} systems",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "305--306",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:1995:SEA,
author = "Xiaodong Zhang and Zhichen Xu",
title = "A semi-empirical approach to scalability study",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "307--308",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hughes:1995:PFP,
author = "Eric Hughes and Marianne Winslett",
title = "{PEDCAD}: a framework for performance evaluation of
object database applications",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "309--310",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223625",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salehi:1995:SCA,
author = "James D. Salehi and James F. Kurose and Don Towsley",
title = "Scheduling for cache affinity in parallelized
communication protocols",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "311--312",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223626",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We explore processor-cache affinity scheduling of
parallel network protocol processing in a setting in
which protocol processing executes on a shared-memory
multiprocessor concurrently with a general workload of
non-protocol activity. We find that affinity scheduling
can significantly reduce the communication delay
associated with protocol processing, enabling the host
to support a greater number of concurrent streams and
to provide a higher maximum throughput to individual
streams. In addition, we compare implementations of two
parallelization approaches ({\em Locking\/} and {\em
Independent Protocol Stacks\/}) with very different
caching behaviors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chatterjee:1995:MAM,
author = "Amit K. Chatterjee and Vijay K. Konangi",
title = "Modeling and analysis of multi channel asymmetric
packet switch modules in a bursty and nonuniform
traffic environment",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "313--314",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223627",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:1995:TNT,
author = "Gautam Shah and Umakishore Ramachandran and Richard
Fujimoto",
title = "{Timepatch}: a novel technique for the parallel
simulation of multiprocessor caches",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "315--316",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223628",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sundaram:1995:FAB,
author = "C. R. M. Sundaram and Derek L. Eager",
title = "Future applicability of bus-based shared memory
multiprocessors",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "317--318",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223629",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:1995:MFC,
author = "Gianfranco Ciardo and Ludmila Cherkasova and Vadim
Kotov and Tomas Rokicki",
title = "Modeling a {Fibre Channel} switch with stochastic
{Petri} nets",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "319--320",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223586.223630",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arunachalam:1995:PPP,
author = "Meenakshi Arunachalam and Alok Choudhary",
title = "A prefetching prototype for the parallel file systems
on the {Paragon}",
journal = j-SIGMETRICS,
volume = "23",
number = "1",
pages = "321--322",
month = may,
year = "1995",
CODEN = "????",
DOI = "https://doi.org/10.1145/223587.223631",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:18:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gopalakrishnan:1996:BRT,
author = "R. Gopalakrishnan and Gurudatta M. Parulkar",
title = "Bringing real-time scheduling theory and practice
closer for multimedia computing",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "1--12",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233017",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper seeks to bridge the gap between theory and
practice of real-time scheduling in the domain of high
speed multimedia networking. We show that the strict
preemptive nature of real-time scheduling leads to more
context switching, and requires system calls for
concurrency control. We present our scheduling scheme
called rate-monotonic with delayed preemption (
rmdp) and show how it reduces both these overheads. We
then develop the analytical framework to analyze rmdp
and other scheduling schemes that lie in the region
between strict (immediate) preemption and no
preemption. Our {\em idealized scheduler simulation\/}
methodology accounts for the blocking introduced by
these schemes under the usual assumption that the time
for context switching and preemption is zero. We derive
simpler schedulability tests for non-preemptive
scheduling, and prove a variant of rate-monotonic
scheduling that has fewer preemptions. Our measurements
on Sparc and Pentium platforms, show that for the
workloads we considered, Rmdp increases useful
utilization by as much as 8\%. Thus our scheduling
policies have the potential to improve performance over
existing methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:1996:EPL,
author = "Mor Harchol-Balter and Allen B. Downey",
title = "Exploiting process lifetime distributions for dynamic
load balancing",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "13--24",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233019",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We measure the distribution of lifetimes for UNIX
processes and propose a functional form that fits this
distribution well. We use this functional form to
derive a policy for preemptive migration, and then use
a trace-driven simulator to compare our proposed policy
with other preemptive migration policies, and with a
non-preemptive load balancing strategy. We find that,
contrary to previous reports, the performance benefits
of preemptive migration are significantly greater than
those of non-preemptive migration, even when the
memory-transfer cost is high. Using a model of
migration costs representative of current systems, we
find that preemptive migration reduces the mean delay
(queueing and migration) by 35--50\%, compared to
non-preemptive migration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dusseau:1996:EDS,
author = "Andrea C. Dusseau and Remzi H. Arpaci and David E.
Culler",
title = "Effective distributed scheduling of parallel
workloads",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "25--36",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a distributed algorithm for time-sharing
parallel workloads that is competitive with
coscheduling. {\em Implicit scheduling\/} allows each
local scheduler in the system to make independent
decisions that dynamically coordinate the scheduling of
cooperating processes across processors. Of particular
importance is the blocking algorithm which decides the
action of a process waiting for a communication or
synchronization event to complete. Through simulation
of bulk-synchronous parallel applications, we find that
a simple two-phase fixed-spin blocking algorithm
performs well; a two-phase adaptive algorithm that
gathers run-time data on barrier wait-times performs
slightly better. Our results hold for a range of
machine parameters and parallel program
characteristics. These findings are in direct contrast
to the literature that states explicit coscheduling is
necessary for fine-grained programs. We show that the
choice of the local scheduler is crucial, with a
priority-based scheduler performing two to three times
better than a round-robin scheduler. Overall, we find
that the performance of implicit scheduling is near
that of coscheduling (+/- 35\%), without the
requirement of explicit, global coordination.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:1996:LPB,
author = "Beng-Hong Lim and Ricardo Bianchini",
title = "Limits on the performance benefits of multithreading
and prefetching",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "37--46",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents new analytical models of the
performance benefits of multithreading and prefetching,
and experimental measurements of parallel applications
on the MIT Alewife multiprocessor. For the first time,
both techniques are evaluated on a real machine as
opposed to simulations. The models determine the region
in the parameter space where the techniques are most
effective, while the measurements determine the region
where the applications lie. We find that these regions
do not always overlap significantly. The multithreading
model shows that only 2-4 contexts are necessary to
maximize this technique's potential benefit in current
multiprocessors. Multithreading improves execution time
by less than 10\% for most of the applications that we
examined. The model also shows that multithreading can
significantly improve the performance of the same
applications in multiprocessors with longer latencies.
Reducing context-switch overhead is not crucial. The
software prefetching model shows that allowing 4
outstanding prefetches is sufficient to achieve most of
this technique's potential benefit on current
multiprocessors. Prefetching improves performance over
a wide range of parameters, and improves execution time
by as much as 20-50\% even on current multiprocessors.
The two models show that prefetching has a significant
advantage over multithreading for machines with low
memory latencies and/or applications with high cache
miss rates because a prefetch instruction consumes less
time than a context-switch.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dinda:1996:FMA,
author = "Peter A. Dinda and David R. O'Hallaron",
title = "Fast message assembly using compact address
relations",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "47--56",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Message assembly and disassembly represent a
significant fraction of total communication time in
many parallel systems. We introduce a run-time approach
for fast message assembly and disassembly. The approach
is based on generating addresses by decoding a
precomputed and compactly stored address relation that
describes the mapping of addresses on the source node
to addresses on the destination node. The main result
is that relations induced by redistributions of regular
block-cyclic distributed arrays can be encoded in an
extremely compact form that facilitates high throughput
message assembly and disassembly. We measure the
throughput of decoding-based message assembly and
disassembly on several systems and find performance on
par with copy throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Parsons:1996:CAM,
author = "Eric W. Parsons and Kenneth C. Sevcik",
title = "Coordinated allocation of memory and processors in
multiprocessors",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "57--67",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An important issue in multiprogrammed multiprocessor
systems is the scheduling of parallel jobs. Most
research in the area has focussed solely on the
allocation of processors to jobs. However, since memory
is also a critical resource for many parallel jobs, the
allocation of memory and processors must be coordinated
to allow the system to operate most effectively. To
understand how to design such coordinated scheduling
disciplines, it is important to have a theoretical
foundation. To this end, we develop bounds on the
achievable system throughput when both memory and
processing time are in demand. We then propose and
simulate a simple discipline and relate its performance
to the throughput bounds. An important result of our
work is for the situation in which the workload speedup
is convex (from above), but the speedup characteristics
of individual jobs are unknown. It shows that an
equi-allocation strategy for processors can achieve
near-maximum throughput, yet offer good mean response
times, when both memory and processors are
considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Witchel:1996:EFF,
author = "Emmett Witchel and Mendel Rosenblum",
title = "{Embra}: fast and flexible machine simulation",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "68--79",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes Embra, a simulator for the
processors, caches, and memory systems of uniprocessors
and cache-coherent multiprocessors. When running as
part of the SimOS simulation environment, Embra models
the processors of a MIPS R3000/R4000 machine faithfully
enough to run a commercial operating system and
arbitrary user applications. To achieve high simulation
speed, Embra uses dynamic binary translation to
generate code sequences which simulate the workload. It
is the first machine simulator to use this technique.
Embra can simulate real workloads such as multiprocess
compiles and the SPEC92 benchmarks running on Silicon
Graphic's IRIX 5.3 at speeds only 3 to 9 times slower
than native execution of the workload, making Embra the
fastest reported complete machine simulator. Dynamic
binary translation also gives Embra the flexibility to
dynamically control both the simulation statistics
reported and the simulation model accuracy with low
performance overheads. For example, Embra can customize
its generated code to include a processor cache model
which allows it to compute the cache misses and memory
stall time of a workload. Customized code generation
allows Embra to simulate a machine with caches at
slowdowns of only a factor of 7 to 20. Most of the
statistics generated at this speed match those produced
by a slower reference simulator to within 1\%. This
paper describes the techniques used by Embra to achieve
high performance, focusing on the requirements unique
to machine simulation, including modeling the
processor, memory management unit, and caches. In order
to study Embra's memory system performance we use the
SimOS simulation system to examine Embra itself. We
present a detailed breakdown of Embra's memory system
performance for two cache hierarchies to understand
Embra's current performance and to show that Embra's
implementation techniques benefit significantly from
the larger cache hierarchies that are becoming
available. Embra has been used for operating system
development and testing as well as for studies of
computer architecture. In this capacity it has
simulated large, commercial workloads including IRIX
running a relational database system and a CAD system
for billions of simulated machine cycles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "virtual machine",
}
@Article{Brakmo:1996:ENS,
author = "Lawrence S. Brakmo and Larry L. Peterson",
title = "Experiences with network simulation",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "80--90",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulation is a critical tool in developing, testing,
and evaluating network protocols and architectures.
This paper describes $x$-Sim, a network simulator based
on the $x$-kernel, that is able to fully simulate the
topologies and traffic patterns of large scale
networks. It also illustrates the capabilities and
usefulness of the simulator with case studies. Finally,
based on our experiences using $x$-Sim, we identify a
set of principles (guidelines) for network simulation,
and present concrete examples that quantify the value
of these principles, along with the cost of ignoring
them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Greenberg:1996:AUL,
author = "Albert G. Greenberg and S. Shenker and Alexander L.
Stolyar",
title = "Asynchronous updates in large parallel systems",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "91--103",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Lubachevsky [5] introduced a new parallel simulation
technique intended for systems with limited
interactions between their many components or sites.
Each site has a local simulation time, and the states
of the sites are updated asynchronously. This
asynchronous updating appears to allow the simulation
to achieve a high degree of parallelism, with very low
overhead in processor synchronization. The key issue
for this asynchronous updating technique is: how fast
do the local times make progress in the large system
limit? We show that in a simple $K$-random interaction
model the local times progress at a rate $ 1 / (K +
1)$. More importantly, we find that the asymptotic
distribution of local times is described by a {\em
traveling wave\/} solution with exponentially decaying
tails. In terms of the parallel simulation, though the
interactions are local, a very high degree of global
synchronization results, and this synchronization is
succinctly described by the traveling wave solution.
Moreover, we report on experiments that suggest that
the traveling wave solution is {\em universal\/}; i.e.,
it holds in realistic scenarios (out of reach of our
analysis) where interactions among sites are not
random.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stiliadis:1996:DAF,
author = "Dimitrios Stiliadis and Anujan Varma",
title = "Design and analysis of frame-based fair queueing: a
new traffic scheduling algorithm for packet-switched
networks",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "104--115",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we introduce and analyze {\em
frame-based fair queueing}, a novel traffic scheduling
algorithm for packet-switched networks. The algorithm
provides end-to-end delay bounds identical to those of
PGPS (packet-level generalized processor sharing),
without the complexity of simulating the fluid-model
system in the background as required in PGPS. The
algorithm is therefore ideally suited for
implementation in packet switches supporting a large
number of sessions. We present a simple implementation
of the algorithm for a general packet switch. In
addition, we prove that the algorithm is fair in the
sense that sessions are not penalized for excess
bandwidth they received while other sessions were idle.
Frame-based fair queueing belongs to a general class of
scheduling algorithms, which we call {\em
Rate-Proportional Servers}. This class of algorithms
provides the same end-to-end delay and burstiness
bounds as PGPS, but allows more flexibility in the
design and implementation of the algorithm. We provide
a systematic analysis of this class of schedulers and
obtain bounds on their fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yates:1996:NSL,
author = "David J. Yates and Erich M. Nahum and James F. Kurose
and Don Towsley",
title = "Networking support for large scale multiprocessor
servers",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "116--125",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the next several years the performance demands on
globally available information servers are expected to
increase dramatically. These servers must be capable of
sending and receiving data over hundreds or even
thousands of simultaneous connections. In this paper,
we show that connection-level parallel protocols (where
different connections are processed in parallel)
running on a shared-memory multiprocessor can deliver
high network bandwidth across a large number of
connections. We experimentally evaluate
connection-level parallel implementations of both
TCP/IP and UDP/IP protocol stacks. We focus on three
questions in our performance evaluation: how throughput
scales with the number of processors, how throughput
changes as the number of connections increases, and how
fairly the aggregate bandwidth is distributed across
connections. We show how several factors impact
performance: the number of processors used, the number
of threads in the system, the number of connections
assigned to each thread, and the type of protocols in
the stack (i.e., TCP versus UDP).Our results show that
with careful implementation connection-level parallel
protocol stacks scale well with the number of
processors, and deliver high throughput which is, for
the most part, sustained as the number of connections
increases. Maximizing the number of threads in the
system yields the best overall throughput. However, the
best fairness behavior is achieved by matching the
number of threads to the number of processors and
scheduling connections assigned to threads in a
round-robin manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:1996:WSW,
author = "Martin F. Arlitt and Carey L. Williamson",
title = "{Web} server workload characterization: the search for
invariants",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "126--137",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The phenomenal growth in popularity of the World Wide
Web (WWW, or the Web) has made WWW traffic the largest
contributor to packet and byte traffic on the NSFNET
backbone. This growth has triggered recent research
aimed at reducing the volume of network traffic
produced by Web clients and servers, by using caching,
and reducing the latency for WWW users, by using
improved protocols for Web interaction. Fundamental to
the goal of improving WWW performance is an
understanding of WWW workloads. This paper presents a
workload characterization study for Internet Web
servers. Six different data sets are used in this
study: three from academic (i.e., university)
environments, two from scientific research
organizations, and one from a commercial Internet
provider. These data sets represent three different
orders of magnitude in server activity, and two
different orders of magnitude in time duration, ranging
from one week of activity to one year of activity.
Throughout the study, emphasis is placed on finding
workload {\em invariants\/}: observations that apply
across all the data sets studied. Ten invariants are
identified. These invariants are deemed important since
they (potentially) represent universal truths for all
Internet Web servers. The paper concludes with a
discussion of caching and performance issues, using the
invariants to suggest performance enhancements that
seem most promising for Internet Web servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martonosi:1996:IPM,
author = "Margaret Martonosi and David Ofelt and Mark Heinrich",
title = "Integrating performance monitoring and communication
in parallel computers",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "138--147",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large and increasing gap exists between processor
and memory speeds in scalable cache-coherent
multiprocessors. To cope with this situation,
programmers and compiler writers must increasingly be
aware of the memory hierarchy as they implement
software. Tools to support memory performance tuning
have, however, been hobbled by the fact that it is
difficult to observe the caching behavior of a running
program. Little hardware support exists specifically
for observing caching behavior; furthermore, what
support does exist is often difficult to use for making
fine-grained observations about program memory
behavior. Our work observes that in a multiprocessor,
the actions required for memory performance monitoring
are similar to those required for enforcing cache
coherence. In fact, we argue that on several machines,
the coherence/communication system itself can be used
as machine support for performance monitoring. We have
demonstrated this idea by implementing the FlashPoint
memory performance monitoring tool. FlashPoint is
implemented as a special performance-monitoring
coherence protocol for the Stanford FLASH
Multiprocessor. By embedding performance monitoring
into a cache-coherence scheme based on a programmable
controller, we can gather detailed, per-data-structure,
memory statistics with less than a 10\% slowdown
compared to unmonitored program executions. We present
results on the accuracy of the data collected, and on
how FlashPoint performance scales with the number of
processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnaswamy:1996:MAE,
author = "Umesh Krishnaswamy and Isaac D. Scherson",
title = "Micro-architecture evaluation using performance
vectors",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "148--159",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarking is a widely used approach to measure
computer performance. Current use of benchmarks only
provides running times to describe the performance of a
tested system. Glancing through these execution times
provides little or no information about system
strengths and weaknesses. A novel benchmarking
methodology is proposed to identify key performance
parameters; the methodology is based on measuring
performance vectors. A performance vector is a vector
of ratings that represents delivered performance of
primitive operations of a system. Measuring the
performance vector of a system in a typical user
workload can be a tough problem. We show how the
performance vector falls out of an equation consisting
of dynamic instruction counts and execution times of
benchmarks. We present a non-linear approach for
computing the performance vector. The efficacy of the
methodology is ascertained by evaluating the
micro-architecture of the Sun SuperSPARC superscalar
processor using SPEC benchmarks. Results show
interesting tradeoffs in the SuperSPARC and speak
favorably of our methodology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crovella:1996:SSW,
author = "Mark E. Crovella and Azer Bestavros",
title = "Self-similarity in {World Wide Web} traffic: evidence
and possible causes",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "160--169",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently the notion of {\em self-similarity\/} has
been shown to apply to wide-area and local-area network
traffic. In this paper we examine the mechanisms that
give rise to the self-similarity of network traffic. We
present a hypothesized explanation for the possible
self-similarity of traffic by using a particular subset
of wide area traffic: traffic due to the World Wide Web
(WWW). Using an extensive set of traces of actual user
executions of NCSA Mosaic, reflecting over half a
million requests for WWW documents, we examine the
dependence structure of WWW traffic. While our
measurements are not conclusive, we show evidence that
WWW traffic exhibits behavior that is consistent with
self-similar traffic models. Then we show that the
self-similarity in such traffic can be explained based
on the underlying distributions of WWW document sizes,
the effects of caching and user preference in file
transfer, the effect of user `think time', and the
superimposition of many such transfers in a local area
network. To do this we rely on empirically measured
distributions both from our traces and from data
independently collected at over thirty WWW sites.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hillyer:1996:MPC,
author = "Bruce K. Hillyer and Avi Silberschatz",
title = "On the modeling and performance characteristics of a
serpentine tape drive",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "170--179",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New applications require online access to many
terabytes of data, but a magnetic disk storage system
this large requires thousands of drives. Magnetic tape
is be a good alternative, except that the application
demand for transparent data retrieval is not met by
current tape systems because of their high access
latency. This latency can be significantly improved by
good retrieval scheduling. A fundamental prerequisite
to efficient scheduling is the ability to estimate the
amount of time required for tape positioning operations
(the {\em locate time\/}). For serpentine tape, which
is the most common mass storage tape technology, this
estimation is subtle and complex. The main contribution
of this paper is a locate-time model for a DLT4000 tape
drive. The accuracy of the model is evaluated by
measurements, and the utility of the model is
demonstrated through a model-driven simulation of
retrieval scheduling, validated by measurements and
sensitivity testing. In brief, the locate-time model is
accurate to within a few percent, which enables the
production of efficient schedules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:1996:AMH,
author = "Daniel A. Menasc{\'e} and Odysseas I. Pentakalos and
Yelena Yesha",
title = "An analytic model of hierarchical mass storage systems
with network-attached storage devices",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "180--189",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network attached storage devices improve I/O
performance by separating control and data paths and
eliminating host intervention during data transfer.
Devices are attached to a high speed network for data
transfer and to a slower network for control messages.
Hierarchical mass storage systems use disks to cache
the most recently used files and tapes (robotic and
manually mounted) to store the bulk of the files in the
file system. This paper shows how queuing network
models can be used to assess the performance of
hierarchical mass storage systems that use network
attached storage devices. The analytic model validated
through simulation was used to analyze many different
scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:1996:AAW,
author = "Ken Chen and Laurent Decreusefond",
title = "An approximate analysis of waiting time in multi-class
{M/G/1/./EDF} queues",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "190--199",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Earliest-Deadline-First (EDF) queueing discipline
is being more and more widely used for handling
time-sensitive applications in computer systems and
networks. In this paper, we consider an arbitrary
number of traffic classes with class-specific
soft-deadline. A soft-deadline is a target waiting-time
limit that can be missed. EDF queueing has been proved
to minimize the maximum delay overflow related to this
limit. We propose a quantitative analysis, through the
metric of mean waiting time, on the behavior of EDF
queueing. This analysis gives also insight on the
correlation between traffic classes with different
time-constraints. Technically speaking, we have proven
that the mean waiting times for an arbitrary set of $N$
classes of traffic streams with soft deadlines are the
unique solution of a system of non-linear equations
under the constraint of the Kleinrock's conservation
law. We then provide an $ O(N^2)$ algorithm to get the
solution. Simulation suggests that the theoretical
approximation we made is quite acceptable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "communication networks; computer architecture;
multimedia systems; real-time systems; stochastic
modeling",
}
@Article{Aggarwal:1996:OPM,
author = "Charu Aggarwal and Joel Wolf and Philip S. Yu",
title = "On optimal piggyback merging policies for
video-on-demand systems",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "200--209",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A critical issue in the performance of a
video-on-demand system is the I/O bandwidth required in
order to satisfy client requests. A number of
techniques have been proposed in order to reduce these
bandwidth requirements. In this paper we concentrate on
one such technique, known as adaptive piggybacking. We
develop and analyze piggyback merging policies which
are optimal over large classes of reasonable methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gerber:1996:EDV,
author = "Richard Gerber and Ladan Gharai",
title = "Experiments with digital video playback",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "210--221",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe our experiments on digital
video applications, concentrating on the static and
dynamic tradeoffs involved in video playback. Our
results were extracted from a controlled series of 272
tests, which we ran in three stages. In the first stage
of 120 tests, we used a simple player-monitor tool to
evaluate the effects of various static parameters: {\em
compression type, frame size, digitized rate, spatial
quality\/} and {\em keyframe distribution.\/} The tests
were carried out on two Apple Macintosh platforms: at
the lower end a Quadra 950, and at the higher end, a
Power PC 7100/80. Our quantitative metrics included
average playback rate, as well as the rate's variance
over one-second intervals. The first set of experiments
unveiled several anomalous latencies. To track them
down we ran an additional 120 tests, from which we
concluded that the video and IO operations were
insufficiently tuned to each other. In the next step we
attempted to correct this problem, by implementing our
own video playback software and accompanying
device-level handlers. Our emphasis was on achieving a
controlled, deterministic coordination between the
various system components. An additional set of 32
experiments were carried out on our platforms, which
showed frame-rate increases of up to 325\%, with
associated reductions in rate variance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salehi:1996:SSV,
author = "James D. Salehi and Zhi-Li Zhang and James F. Kurose
and Don Towsley",
title = "Supporting stored video: reducing rate variability and
end-to-end resource requirements through optimal
smoothing",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "222--231",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "VBR compressed video is known to exhibit significant,
multiple-time-scale bit rate variability. In this
paper, we consider the transmission of stored video
from a server to a client across a high speed network,
and explore how the client buffer space can be used
most effectively toward reducing the variability of the
transmitted bit rate. We present two basic results.
First, we present an optimal smoothing algorithm for
achieving the {\em greatest possible reduction in rate
variability\/} when transmitting stored video to a
client with given buffer size. We provide a formal
proof of optimality, and demonstrate the performance of
the algorithm on a set of long MPEG-1 encoded video
traces. Second, we evaluate the impact of optimal
smoothing on the network resources needed for video
transport, under two network service models:
Deterministic Guaranteed service [1, 9] and
Renegotiated CBR (RCBR) service [8, 7]. Under both
models, we find the impact of optimal smoothing to be
dramatic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varki:1996:ABF,
author = "Elizabeth Varki and Lawrence W. Dowdy",
title = "Analysis of balanced fork-join queueing networks",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "232--241",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an analysis of closed, balanced,
fork-join queueing networks with exponential service
time distributions. The fork-join queue is mapped onto
two non-parallel networks, namely, a serial-join model
and a state-dependent model. Using these models, it is
proven that the proportion of the number of jobs in the
different subsystems of the fork-join queueing network
remains constant, irrespective of the multiprogramming
level. This property of balanced fork-join networks is
used to compute quick, inexpensive bounds for arbitrary
fork-join networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carrasco:1996:EEA,
author = "Juan A. Carrasco and Javier Escrib{\'a} and Angel
Calder{\'o}n",
title = "Efficient exploration of availability models guided by
failure distances",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "242--251",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233049",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, a method to bound the steady-state
availability using the failure distance concept has
been proposed. In this paper we refine that method by
introducing state space exploration techniques. In the
methods proposed here, the state space is incrementally
generated based on the contributions to the
steady-state availability band of the states in the
frontier of the currently generated state space.
Several state space exploration algorithms are
evaluated in terms of bounds quality and memory and CPU
time requirements. The more efficient seems to be a
waved algorithm which expands transition groups. We
compare our new methods with the method based on the
failure distance concept without state exploration and
a method proposed by Souza e Silva and Ochoa which uses
state space exploration but does not use the failure
distance concept. Using typical examples we show that
the methods proposed here can be significantly more
efficient than any of the previous methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garg:1996:MCT,
author = "Sachin Garg and Yennun Huang and Chandra Kintala and
Kishor S. Trivedi",
title = "Minimizing completion time of a program by
checkpointing and rejuvenation",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "252--261",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Checkpointing with rollback-recovery is a well known
technique to reduce the completion time of a program in
the presence of failures. While checkpointing is
corrective in nature, rejuvenation refers to preventive
maintenance of software aimed to reduce unexpected
failures mostly resulting from the `aging' phenomenon.
In this paper, we show how both these techniques may be
used together to further reduce the expected completion
time of a program. The idea of using checkpoints to
reduce the amount of rollback upon a failure is taken a
step further by combining it with rejuvenation. We
derive the equations for expected completion time of a
program with finite failure free running time for the
following three cases when; (a) neither checkpointing
nor rejuvenation is employed, (b) only checkpointing is
employed, and finally (c) both checkpointing and
rejuvenation are employed. We also present numerical
results for Weibull failure time distribution for the
above three cases and discuss optimal checkpointing and
rejuvenation that minimizes the expected completion
time. Using the numerical results, some interesting
conclusions are drawn about benefits of these
techniques in relation to the nature of failure
distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kimbrel:1996:IPP,
author = "Tracy Kimbrel and Pei Cao and Edward W. Felten and
Anna R. Karlin and Kai Li",
title = "Integrated parallel prefetching and caching",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "262--263",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leutenegger:1996:BME,
author = "Scott T. Leutenegger and Mario A. Lopez",
title = "A buffer model for evaluating {R}-tree performance",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "264--265",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:1996:ASM,
author = "Joseph L. Hellerstein",
title = "An approach to selecting metrics for detecting
performance problems in information systems",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "266--267",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Courtright:1996:RRP,
author = "William V. {Courtright II} and Garth Gibson and Mark
Holland and Jim Zelenka",
title = "{RAIDframe}: rapid prototyping for disk arrays",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "268--269",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramany:1996:QAR,
author = "Swaminathan Ramany and Derek Eager",
title = "Quantifying achievable routing performance in
multiprocessor interconnection networks",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "270--271",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233013.233059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hotovy:1996:AEW,
author = "Steven Hotovy and David Schneider and Timothy
O'Donnell",
title = "Analysis of the early workload on the {Cornell Theory
Center IBM SP2}",
journal = j-SIGMETRICS,
volume = "24",
number = "1",
pages = "272--273",
month = may,
year = "1996",
CODEN = "????",
DOI = "https://doi.org/10.1145/233008.233060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:21:30 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel computers have matured to the point where
they are capable of running a significant production
workload. Characterizing this workload, however, is far
more complicated than for the single-processor case.
Besides the varying number of processors that may be
invoked, the nodes themselves may provide differing
computational resources (memory size, for example). In
addition, the batch schedulers may introduce further
categories of service which must be considered in the
analysis. The Cornell Theory Center (CTC) put a
512-node IBM SP2 system into production in early 1995.
Extended traces of batch jobs began to be collected in
mid-1995 when the usage base became sufficiently large.
This paper offers an analysis of this early batch
workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Braun:1997:APL,
author = "Hans-Werner Braun",
title = "Architecture and performance of large internets, based
on terrestrial and satellite infrastructure",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "1--1",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258628",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balakrishnan:1997:ASW,
author = "Hari Balakrishnan and Mark Stemm and Srinivasan Seshan
and Randy H. Katz",
title = "Analyzing stability in wide-area network performance",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "2--12",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258631",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Internet is a very large scale, complex, dynamical
system that is hard to model and analyze. In this
paper, we develop and analyze statistical models for
the observed end-to-end network performance based on
extensive packet-level traces (consisting of
approximately 1.5 billion packets) collected from the
primary Web site for the Atlanta Summer Olympic Games
in 1996. We find that observed mean throughputs for
these transfers measured over 60 million complete
connections vary widely as a function of end-host
location and time of day, confirming that the Internet
is characterized by a large degree of heterogeneity.
Despite this heterogeneity, we find (using best-fit
linear regression techniques) that we can express the
throughput for Web transfers to most hosts as a random
variable with a log-normal distribution. Then, using
observed throughput as the control parameter, we
attempt to quantify the {\em spatial\/} (statistical
similarity across neighboring hosts) and {\em
temporal\/} (persistence over time) stability of
network performance. We find that Internet hosts that
are close to each other often have almost identically
distributed probability distributions of throughput. We
also find that throughputs to individual hosts often do
not change appreciably for several minutes. Overall,
these results indicate that there is promise in
protocol mechanisms that cache and share network
characteristics both within a single host and amongst
nearby hosts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Maltzahn:1997:PIE,
author = "Carlos Maltzahn and Kathy J. Richardson and Dirk
Grunwald",
title = "Performance issues of enterprise level {Web} proxies",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "13--23",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258668",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Enterprise level Web proxies relay World-Wide Web
traffic between private networks and the Internet. They
improve security, save network bandwidth, and reduce
network latency. While the performance of web proxies
has been analyzed based on synthetic workloads, little
is known about their performance on real workloads. In
this paper we present a study of two web proxies (CERN
and Squid) executing real workloads on Digital's Palo
Alto Gateway. We demonstrate that the simple CERN proxy
architecture outperforms all but the latest version of
Squid and continues to outperform cacheless
configurations. For the measured load levels the Squid
proxy used at least as many CPU, memory, and disk
resources as CERN, in some configurations significantly
more resources. At higher load levels the resource
utilization requirements will cross and Squid will be
the one using fewer resources. Lastly we found that
cache hit rates of around 30\% had very little effect
on the requests service time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heyman:1997:NMA,
author = "D. P. Heyman and T. V. Lakshman and Arnold L.
Neidhardt",
title = "A new method for analysing feedback-based protocols
with applications to engineering {Web} traffic over the
{Internet}",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "24--38",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258670",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most of the studies of feedback-based flow and
congestion control consider only persistent sources
which always have data to send. However, with the rapid
growth of Internet applications built on TCP/IP such as
the World Wide Web and the standardization of traffic
management schemes such as Available Bit Rate (ABR) in
Asynchronous Transfer Mode (ATM) networks, it is
essential to evaluate the performance of feedback-based
protocols using traffic models which are specific to
dominant applications. This paper presents a method for
analysing feedback-based protocols with a Web-user-like
input traffic where the source alternates between
`transfer' periods followed by `think' periods. Our key
results, which are presented for the TCP protocol,
are:(1) The goodputs and the fraction of time that the
system has some given number of transferring sources
are {\em insensitive\/} to the distributions of
transfer (file or page) sizes and think times except
through the ratio of their means. Thus, apart from
network round-trip times, only the ratio of average
transfer sizes and think times of users need be known
to size the network for achieving a specific quality of
service.(2) The Engset model can be adapted to
accurately compute goodputs for TCP and TCP over ATM,
with different buffer management schemes. Though only
these adaptations are given in the paper, the method
based on the Engset model can be applied to analyze
other feedback systems, such as ATM ABR, by finding a
protocol specific adaptation. Hence, the method we
develop is useful not only for analysing TCP using a
source model significantly different from the commonly
used persistent sources, but also can be useful for
analysing other feedback schemes.(3) Comparisons of
simulated TCP traffic to measured Ethernet traffic
shows qualitatively similar autocorrelation when think
times follow a Pareto distribution with infinite
variance. Also, the simulated and measured traffic have
long range dependence. In this sense our traffic model,
which purports to be Web-user-like, also agrees with
measured traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:1997:QME,
author = "Qingming Ma and K. K. Ramakrishnan",
title = "Queue management for explicit rate based congestion
control",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "39--51",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258672",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Rate based congestion control has been considered
desirable, both to deal with the high bandwidth-delay
products of today's high speed networks, and to match
the needs of emerging multimedia applications. Explicit
rate control achieves low loss because sources transmit
smoothly at a rate adjusted through feedback to be
within the capacity of the resources in the network.
However, large feedback delays, presence of higher
priority traffic, and varying transient situations make
it difficult to ensure {\em feasibility\/} (i.e., keep
the aggregate arrival rate below the bottleneck
resource's capacity) while also maintaining high
resource utilization. These conditions along with the
`fast start' desired by data applications often result
in substantial queue buildups. We describe a scheme
that manages the queue buildup at a switch even under
the most aggressive patterns of sources, in the context
of the Explicit Rate option for the Available Bit Rate
(ABR) congestion control scheme. A switch observes the
buildup of its queue, and uses it to reduce the portion
of the link capacity allocated to sources bottlenecked
at that link. We use the concept of a `virtual' queue,
which tracks the amount of queue that has been
`reduced', but has not yet taken effect at the switch.
We take advantage of the natural timing of `resource
management' (RM) cells transmitted by sources. The
scheme is elegant in that it is simple, and we show
that it reduces the queue buildup, in some cases, by
more than two orders of magnitude and the queue size
remains around a desired target. It maintains max-min
fairness even when the queue is being drained. The
scheme is scalable, and is as responsive as can be
expected: within the constraints of the feedback delay.
Finally, no changes are needed to the ATM Forum defined
source/destination policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ott:1997:TAA,
author = "Teunis J. Ott and Neil Aggarwal",
title = "{TCP} over {ATM}: {ABR} or {UBR}?",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "52--63",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258674",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on a simulation study of the
relative performances of the ATM ABR and UBR service
categories in transporting TCP/IP flows through an ATM
Network. The objective is two-fold: (i) to understand
the interaction between the window-based end-to-end
flowcontrol TCP and the rate based flowcontrol ABR
which is restricted to the ATM part of the network, and
(ii) to decide whether the greater complexity of ABR
(than UBR) pays off in better performance of ABR (than
UBR).The most important conclusion is that there does
not seem to be strong evidence that for TCP/IP
workloads the greater complexity of ABR pays off in
better performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kasera:1997:SRM,
author = "Sneha K. Kasera and Jim Kurose and Don Towsley",
title = "Scalable reliable multicast using multiple multicast
groups",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "64--74",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258676",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine an approach for providing reliable,
scalable multicast communication, using multiple
multicast groups for reducing receiver processing costs
in a multicast session. In this approach a single
multicast group is used for the original transmission
of packets. Retransmissions of packets are done to
separate multicast groups, which receivers dynamically
join or leave. We first show that by using an infinite
number of multicast groups, processing overhead at the
receivers are substantially reduced. Next, we show
that, for a specific negative acknowledgment
(NAK)-based protocol, most of this reduction can be
obtained by using only a small number of multicast
groups for a wide range of system parameters. Finally,
we present a local filtering scheme for minimizing
join/leave signaling when multiple multicast groups are
used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajamony:1997:PDS,
author = "Ramakrishnan Rajamony and Alan L. Cox",
title = "Performance debugging shared memory parallel programs
using run-time dependence analysis",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "75--87",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258678",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe a new approach to performance debugging
that focuses on automatically identifying computation
transformations to reduce synchronization and
communication. By grouping writes together into {\em
equivalence classes}, we are able to tractably collect
information from long-running programs. Our performance
debugger analyzes this information and suggests
computation transformations in terms of the source
code. We present the transformations suggested by the
debugger on a suite of four applications. For
Barnes--Hut and Shallow, implementing the debugger
suggestions improved the performance by a factor of
1.32 and 34 times respectively on an 8-processor IBM
SP2. For Ocean, our debugger identified excess
synchronization that did not have a significant impact
on performance. ILINK, a genetic linkage analysis
program widely used by geneticists, is already well
optimized. We use it only to demonstrate the
feasibility of our approach to long-running
applications. We also give details on how our approach
can be implemented. We use novel techniques to convert
control dependences to data dependences, and to compute
the source operands of stores. We report on the impact
of our instrumentation on the same application suite we
use for performance debugging. The instrumentation
slows down the execution by a factor of between 4 and
169 times. The log files produced during execution were
all less than 2.5 Mbytes in size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Herbordt:1997:PSC,
author = "Martin C. Herbordt and Owais Kidwai and Charles C.
Weems",
title = "Preprototyping {SIMD} coprocessors using virtual
machine emulation and trace compilation",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "88--99",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258679",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of massively parallel SIMD array architectures
is proliferating in the area of domain specific
coprocessors. Even so, they have undergone few
systematic empirical studies. The underlying problems
include the size of the architecture space, the lack of
portability of the test programs, and the inherent
complexity of simulating up to hundreds of thousands of
processing elements. We address the computational cost
problem with a novel approach to trace-based
simulation. Code is run on an abstract virtual machine
to generate a coarse-grained trace, which is then
refined through a series of transformations (a process
we call {\em trace compilation\/}) wherein greater
resolution is obtained with respect to the details of
the target machine. We have found this technique to be
one to two orders of magnitude faster than
instruction-level simulation while still retaining much
of the accuracy of the model. Furthermore, abstract
machine traces must be regenerated for only a small
fraction of the possible parameter combinations. Using
virtual machine emulation and trace compilation also
addresses program portability by allowing the user to
code in a single data parallel language with a single
compiler, regardless of the target architecture. This
technique has already been used to generate significant
results with respect to SIMD array architectures, a
sample of which are presented here.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tomkins:1997:IMP,
author = "Andrew Tomkins and R. Hugo Patterson and Garth
Gibson",
title = "Informed multi-process prefetching and caching",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "100--114",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258680",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Informed prefetching and caching based on application
disclosure of future I/O accesses (hints) can
dramatically reduce the execution time of I/O-intensive
applications. A recent study showed that, in the
context of a single hinting application, prefetching
and caching algorithms should adapt to the dynamic load
on the disks to obtain the best performance. In this
paper, we show how to incorporate adaptivity to disk
load into the TIP2 system, which uses {\em cost-benefit
analysis\/} to allocate global resources among multiple
processes. We compare the resulting system, which we
call TIPTOE (TIP with Temporal Overload Estimators) to
Cao et al's LRU-SP allocation scheme, also modified to
include adaptive prefetching. Using disk-accurate
trace-driven simulation we show that, averaged over
eleven experiments involving pairs of hinting
applications, and with data striped over one to ten
disks, TIPTOE delivers 7\% lower execution time than
LRU-SP. Where the computation and I/O demands of each
experiment are closely matched, in a two-disk array,
TIPTOE delivers 18\% lower execution time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glass:1997:APR,
author = "Gideon Glass and Pei Cao",
title = "Adaptive page replacement based on memory reference
behavior",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "115--126",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258681",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As disk performance continues to lag behind that of
memory systems and processors, virtual memory
management becomes increasingly important for overall
system performance. In this paper we study the page
reference behavior of a collection of memory-intensive
applications, and propose a new virtual memory page
replacement algorithm, SEQ. SEQ detects long sequences
of page faults and applies most-recently-used
replacement to those sequences. Simulations show that
for a large class of applications, SEQ performs close
to the optimal replacement algorithm, and significantly
better than Least-Recently-Used (LRU). In addition, SEQ
performs similarly to LRU for applications that do not
exhibit sequential faulting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Voelker:1997:MSL,
author = "Geoffrey M. Voelker and Herv{\'e} A. Jamrozik and Mary
K. Vernon and Henry M. Levy and Edward D. Lazowska",
title = "Managing server load in global memory systems",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "127--138",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258682",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "New high-speed switched networks have reduced the
latency of network page transfers significantly below
that of local disk. This trend has led to the
development of systems that use network-wide memory, or
{\em global\/} memory, as a cache for virtual memory
pages or file blocks. A crucial issue in the
implementation of these global memory systems is the
selection of the target nodes to receive replaced
pages. Current systems use various forms of an
approximate global LRU algorithm for making these
selections. However, using age information alone can
lead to suboptimal performance in two ways. First,
workload characteristics can lead to uneven
distributions of old pages across servers, causing
increased contention delays. Second, the global memory
traffic imposed on a node can degrade the performance
of local jobs on that node. This paper studies the
potential benefit and the potential harm of using load
information, in addition to age information, in global
memory replacement policies. Using an analytic queueing
network model, we show the extent to which server load
can degrade remote memory latency and how load
balancing solves this problem. Load balancing requests
can cause the system to deviate from the global LRU
replacement policy, however. Using trace-driven
simulation, we study the impact on application
performance of deviating from the LRU replacement
policy. We find that deviating from strict LRU, even
significantly for some applications, does not affect
application performance. Based upon these results, we
conclude that global memory systems can gain
substantial benefit from load balancing requests with
little harm from suboptimal replacement decisions.
Finally, we illustrate the use of the intuition gained
from the model and simulation experiments by proposing
a new family of algorithms that incorporate load
considerations as well as age information in global
memory replacement decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Woodward:1997:SLB,
author = "Michael E. Woodward",
title = "Size-limited batch movement in product-form closed
discrete-time queueing networks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "139--146",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258683",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing models for product-form closed discrete-time
queueing networks with batch movement of customers
implicitly assume that batch sizes are unrestricted. In
many practical modelling situations however, it is
necessary to impose restrictions on the batch sizes,
and this paper examines the repercussions of such
restrictions on the product-form properties of the
networks. It is shown that when batch sizes are
restricted independently then, in general, the
resulting networks cannot have a product-form
equilibrium distribution. Sufficient conditions to
retain a product-form are derived in the cases when
batch sizes are either correlated or depend on the
state of the network. Examples of applying the results
to obtain product-form networks with both correlated
and state dependent batch movement are given.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:1997:BPM,
author = "Leana Golubchik and John C. S. Lui",
title = "Bounding of performance measures for a threshold-based
queueing system with hysteresis",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "147--157",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258684",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider a $K$-server
threshold-based queueing system with hysteresis in
which the number of servers, employed for servicing
customers, is governed by a {\em forward threshold\/}
vector $ F = (F_1, F_2, \ldots {}, F_{K - 1})$ (where $
F_1 < F_2 < F_{K - 1}$) and a {\em reverse threshold\/}
vector $ R = (R_1, R_2, \ldots {}, R_{K - 1})$ (where $
R_1 < R_2 < R_{K - 1}$). There are many applications
where a threshold-based queueing system can be of great
use. The main motivation for using a threshold-based
approach in such applications is that they incur
significant server setup, usage, and removal costs.
And, as in most practical situations, an important
concern is not only the system performance but rather
its cost/performance ratio. The motivation for use of
hysteresis is to control the cost during momentary
fluctuations in workload. An important and
distinguishing characteristic of our work is that in
our model we consider the {\em time to add a server to
be non-negligible.\/} This is a more accurate model,
for many applications, than previously considered in
other works. Our main goal in this work is to develop
an efficient method for computing the steady state
probabilities of a multi-server threshold queueing
system with hysteresis, which will, in turn, allow
computation of various performance measures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lehoczky:1997:URT,
author = "John P. Lehoczky",
title = "Using real-time queueing theory to control lateness in
real-time systems",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "158--168",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258685",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents {\em real-time queueing theory}, a
new theory which embeds the ability of real-time
scheduling theory to determine whether task timing
requirements are met into the context of queueing
models. Specifically, this paper extends the analysis
developed in Lehoczky [9] to the GI/M/1 case. The paper
also applies these models to study queue control
strategies which can control customer lateness.
Arriving customers have deadlines drawn from a general
deadline distribution. The state variable for the
queueing system must include the number in the queue
(with supplementary variables as needed to create a
Markov model) and the {\em lead-time\/} (deadline minus
current time) of each customer; thus the state space is
infinite dimensional. One can represent the state of
the system as a measure on the real line and can
represent that measure by its Fourier transform. Thus,
a real-time queueing system can be characterized as a
Markov process evolving on the space of Fourier
transforms, and this paper presents a characterization
of the instantaneous simultaneous lead-time profile of
all the customers in the queue. This profile is
complicated; however, in the heavy traffic case, a
simple description of the lead-time profile emerges,
namely that the lead-time profile behaves like a
Brownian motion evolving on a particular manifold of
Fourier transforms; the manifold depending upon the
queue discipline and the customer deadline
distributions. This approximation is very accurate when
compared with simulations. Real-time queueing theory
focuses on how well a particular queue discipline meets
customer timing requirements, and focuses on the
dynamic rather than the equilibrium behavior of the
system. As such, it offers the potential to study
control strategies to ensure that customers meet their
deadlines. This paper illustrates the analysis and
performance evaluation for certain queue control
strategies. Generalizations to more complicated models
and to queueing networks are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nahum:1997:CBN,
author = "Erich Nahum and David Yates and Jim Kurose and Don
Towsley",
title = "Cache behavior of network protocols",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "169--180",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258686",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a performance study of memory
reference behavior in network protocol processing,
using an Internet-based protocol stack implemented in
the $x$-kernel running in user space on a MIPS
R4400-based Silicon Graphics machine. We use the
protocols to drive a validated execution-driven
architectural simulator of our machine. We characterize
the behavior of network protocol processing, deriving
statistics such as cache miss rates and percentage of
time spent waiting for memory. We also determine how
sensitive protocol processing is to the architectural
environment, varying factors such as cache size and
associativity, and predict performance on future
machines. We show that network protocol cache behavior
varies widely, with miss rates ranging from 0 to 28
percent, depending on the scenario. We find instruction
cache behavior has the greatest effect on protocol
latency under most cases, and that cold cache behavior
is very different from warm cache behavior. We
demonstrate the upper bounds on performance that can be
expected by improving memory behavior, and the impact
of features such as associativity and larger cache
sizes. In particular, we find that TCP is more
sensitive to cache behavior than UDP, gaining larger
benefits from improved associativity and bigger caches.
We predict that network protocols will scale well with
CPU speeds in the future.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Knightly:1997:SMR,
author = "Edward W. Knightly",
title = "Second moment resource allocation in multi-service
networks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "181--191",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258687",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A crucial problem for the efficient design and
management of integrated services networks is how to
best allocate network resources for heterogeneous and
bursty traffic streams in multiplexers that support
prioritized service disciplines. In this paper, we
introduce a new approach for determining per-connection
performance parameters such as delay-bound violation
probability and loss probability in multi-service
networks. The approach utilizes a traffic
characterization consisting of the variances of a
stream's rate distribution over multiple interval
lengths, which captures its burstiness properties and
autocorrelation structure. From this traffic
characterization, we provide a simple and efficient
resource allocation algorithm by deriving stochastic
delay-bounds for static priority schedulers and
employing a Gaussian approximation over intervals. To
evaluate the scheme, we perform trace-driven simulation
experiments with long traces of MPEG-compressed video
and show that our approach is accurate enough to
capture most of the inherent statistical multiplexing
gain, achieving average network utilizations of up to
90\% for these traces and substantially outperforming
previous `effective bandwidth' techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krunz:1997:CVM,
author = "Marwan Krunz and Satish K. Tripathi",
title = "On the characterization of {VBR MPEG} streams",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "192--202",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a comprehensive model for variable-bit-rate
MPEG video streams. This model captures the bit-rate
variations at multiple time scales. Long-term
variations are captured by incorporating scene changes,
which are most noticeable in the fluctuations of $I$
frames. The size of an $I$ frame is modeled by the sum
of two random components: a scene-related component and
an AR(2) component that accounts for the fluctuations
within a scene. Two random processes of {\em i.i.d.\/}
rvs are used to model the sizes of {\em P\/} and $B$
frames, respectively. The complete model is then
obtained by intermixing the three sub-models according
to a given GOP pattern. It is shown that the composite
model exhibits long-range dependence (LRD) in the sense
that its autocorrelation function is non-summable. The
LRD behavior is caused by the repetitive GOP pattern
which induces periodic cross-correlations between
different types of frames. Using standard statistical
methods, we successfully fit our model to several
empirical video traces. We then study the queueing
performance for video traffic at a statistical
multiplexer. The results show that the model is
sufficiently accurate in predicting the queueing
performance for real video streams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smith:1997:FSA,
author = "Keith A. Smith and Margo I. Seltzer",
title = "File system aging --- increasing the relevance of file
system benchmarks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "203--213",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarks are important because they provide a means
for users and researchers to characterize how their
workloads will perform on different systems and
different system architectures. The field of file
system design is no different from other areas of
research in this regard, and a variety of file system
benchmarks are in use, representing a wide range of the
different user workloads that may be run on a file
system. A realistic benchmark, however, is only one of
the tools that is required in order to understand how a
file system design will perform in the real world. The
benchmark must also be executed on a realistic file
system. While the simplest approach may be to measure
the performance of an empty file system, this
represents a state that is seldom encountered by real
users. In order to study file systems in more
representative conditions, we present a methodology for
aging a test file system by replaying a workload
similar to that experienced by a real file system over
a period of many months, or even years. Our aging tools
allow the same aging workload to be applied to multiple
versions of the same file system, allowing scientific
evaluation of the relative merits of competing file
system designs. In addition to describing our aging
tools, we demonstrate their use by applying them to
evaluate two enhancements to the file layout policies
of the UNIX fast file system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brown:1997:OSB,
author = "Aaron B. Brown and Margo I. Seltzer",
title = "Operating system benchmarking in the wake of {\tt
lmbench}: a case study of the performance of {NetBSD}
on the {Intel x86} architecture",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "214--224",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258690",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The {\tt lmbench} suite of operating system
microbenchmarks provides a set of portable programs for
use in cross-platform comparisons. We have augmented
the {\tt lmbench} suite to increase its flexibility and
precision, and to improve its methodological and
statistical operation. This enables the detailed study
of interactions between the operating system and the
hardware architecture. We describe modifications to
{\tt lmbench}, and then use our new benchmark suite,
{\tt hbench:OS}, to examine how the performance of
operating system primitives under NetBSD has scaled
with the processor evolution of the Intel x86
architecture. Our analysis shows that off-chip memory
system design continues to influence operating system
performance in a significant way and that key design
decisions (such as suboptimal choices of DRAM and cache
technology, and memory-bus and cache coherency
protocols) can essentially nullify the performance
benefits of the aggressive execution core and
sophisticated on-chip memory system of a modern
processor such as the Intel Pentium Pro.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "See long rebuttal in {\tt hbench-REBUTTAL} in
\url{http://bitmover.com/lmbench/} source code.",
}
@Article{Acharya:1997:UEI,
author = "Anurag Acharya and Guy Edjlali and Joel Saltz",
title = "The utility of exploiting idle workstations for
parallel computation",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "225--234",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258691",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine the utility of exploiting
idle workstations for parallel computation. We attempt
to answer the following questions. First, given a
workstation pool, for what fraction of time can we
expect to find a cluster of $k$ workstations available?
This provides an estimate of the opportunity for
parallel computation. Second, how stable is a cluster
of free machines and how does the stability vary with
the size of the cluster? This indicates how frequently
a parallel computation might have to stop for adapting
to changes in processor availability. Third, what is
the distribution of workstation idle-times? This
information is useful for selecting workstations to
place computation on. Fourth, how much benefit can a
user expect? To state this in concrete terms, if I have
a pool of size $S$, how big a parallel machine should I
expect to get for free by harvesting idle machines.
Finally, how much benefit can be achieved on a real
machine and how hard does a parallel programmer have to
work to make this happen? To answer the
workstation-availability questions, we have analyzed
14-day traces from three workstation pools. To
determine the equivalent parallel machine, we have
simulated the execution of a group of well-known
parallel programs on these workstation pools. To gain
an understanding of the practical problems, we have
developed the system support required for adaptive
parallel programs and have used it to build an adaptive
parallel computational fluid dynamics application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qin:1997:PEC,
author = "Xiaohan Qin and Jean-Loup Baer",
title = "A performance evaluation of cluster architectures",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "237--247",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates the performance of
shared-memory cluster-based architectures where each
cluster is a shared-bus multiprocessor augmented with a
protocol processor maintaining cache coherence across
clusters. For a given number of processors, sixteen in
this study, we evaluate the performance of various
cluster configurations. We also consider the impact of
adding a remote shared cache in each cluster. We use
Mean Value Analysis to estimate the cache miss
latencies of various types and the overall execution
time. The service demands of shared resources are
characterized in detail by examining the sub-requests
issued in resolving cache misses. In addition to the
architectural system parameters and the service demands
on resources, the analytical model needs parameters
pertinent to applications. The latter, in particular
cache miss profiles, are obtained by trace-driven
simulation of three benchmarks. Our results show that
without remote caches the performance of cluster-based
architectures is mixed. In some configurations, the
negative effects of the longer latency of inter-cluster
misses and of the contention on the protocol processor
are too large to counter-balance the lower contention
on the data buses. For two out of the three
applications best results are obtained when the system
has clusters of size 2 or 4. The cluster-based
architectures with remote caches consistently
outperform the single bus system for all 3
applications. We also exercise the model with
parameters reflecting the current trend in technology
making the processor relatively faster than the bus and
memory. Under these new conditions, our results show a
clear performance advantage for the cluster-based
architectures, with or without remote caches, over
single bus systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiueh:1997:DED,
author = "Tzi-cker Chiueh and Srinidhi Varadarajan",
title = "Design and evaluation of a {DRAM}-based shared memory
{ATM} switch",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "248--259",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em Beluga\/} is a single-chip switch architecture
specifically targeted at local area ATM networks, and
it features three architectural innovations. First, an
interconnection hierarchy composed of multiple
switching fabrics is built into the chip to provide
both low-latency cell transfer when the traffic is
light and low cell drop rate under heavy load.
Secondly, to improve silicon efficiency, Beluga is
based on shared memory architecture, and the buffers
are implemented using DRAM rather than SRAM technology.
Heavy interleaving and selective invalidation are used
to address long latency and periodic refreshing
problems, respectively. Thirdly, Beluga supports
multicast with minimal physical bit replication. It
also separates support for unicast and multicast cells
to optimize for the common case, where multicast cells
occur infrequently. This paper describes the design
details of {\em Beluga\/} and the results of a
comprehensive simulation study to quantify the
performance impact of each of its architectural
features. The most important result from this research
is that DRAM-based buffer implementation significantly
reduces the cell-drop rate during heavy while
exhibiting almost identical cell latency to SRAM-based
implementation during light load. Therefore, we believe
DRAM makes an attractive alternative for switch buffer
implementation, especially for single-chip architecture
such as {\em Beluga.\/}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Song:1997:ERC,
author = "Junehwa Song and Asit Dan and Dinkar Sitaram",
title = "Efficient retrieval of composite multimedia objects in
the {JINSIL} distributed system",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "260--271",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258695",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a distributed environment, presentation of
structured, composite multimedia information poses new
challenges in dealing with variable bandwidth (BW)
requirement and synchronization of media data objects.
The detailed knowledge of BW requirement obtained by
analyzing the document structure can be used to create
a prefetch schedule that results in efficient
utilization of system resources. A distributed
environment consists of various system components that
are either dedicated to a client or shared across
multiple clients. Shared system components could
benefit from {\em Fine Granularity Advanced Reservation
(FGAR)\/} of resources based on true BW requirement.
Prefetching by utilizing advance knowledge of BW
requirement can further improve resource utilization.
In this paper, we describe the JINSIL retrieval system
that takes into account the available bandwidth and
buffer resources and the nature of sharing in each
component on the delivery path. It reshapes BW
requirement, creates prefetch schedule for efficient
resource utilization in each component, and reserves
necessary BW and buffer. We also consider good choices
for placement of prefetch buffers across various system
components.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gibson:1997:FSS,
author = "Garth A. Gibson and David F. Nagle and Khalil Amiri
and Fay W. Chang and Eugene M. Feinberg and Howard
Gobioff and Chen Lee and Berend Ozceri and Erik Riedel
and David Rochberg and Jim Zelenka",
title = "File server scaling with network-attached secure
disks",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "272--284",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258612.258696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "By providing direct data transfer between storage and
client, network-attached storage devices have the
potential to improve scalability for existing
distributed file systems (by removing the server as a
bottleneck) and bandwidth for new parallel and
distributed file systems (through network striping and
more efficient data paths). Together, these advantages
influence a large enough fraction of the storage market
to make commodity network-attached storage feasible.
Realizing the technology's full potential requires
careful consideration across a wide range of file
system, networking and security issues. This paper
contrasts two network-attached storage
architectures---(1) Networked SCSI disks (NetSCSI) are
network-attached storage devices with minimal changes
from the familiar SCSI interface, while (2)
Network-Attached Secure Disks (NASD) are drives that
support independent client access to drive object
services. To estimate the potential performance
benefits of these architectures, we develop an analytic
model and perform trace-driven replay experiments based
on AFS and NFS traces. Our results suggest that NetSCSI
can reduce file server load during a burst of NFS or
AFS activity by about 30\%. With the NASD architecture,
server load (during burst activity) can be reduced by a
factor of up to five for AFS and up to ten for NFS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsiolis:1997:GGC,
author = "Athanassios K. Tsiolis and Mary K. Vernon",
title = "Group-guaranteed channel capacity in multimedia
storage servers",
journal = j-SIGMETRICS,
volume = "25",
number = "1",
pages = "285--297",
month = jun,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/258623.258697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:23:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the open questions in the design of multimedia
storage servers is in what order to serve incoming
requests. Given the capability provided by the disk
layout and scheduling algorithms to serve multiple
streams simultaneously, improved request scheduling
algorithms can reduce customer waiting times. This
results in better service and/or lower customer loss.
In this paper we define a new class of request
scheduling algorithms, called Group-Guaranteed Server
Capacity (GGSC), that preassign server channel capacity
to groups of objects. We also define a particular
formal method for computing the assigned capacities to
achieve a given performance objective. We observe that
the FCFS policy can provide the precise time of service
to incoming customer requests. Under this assumption,
we compare the performance of one of the new GGSC
algorithms, GGSC
W-FCFS, against FCFS and against two other recently
proposed scheduling algorithms: Maximum Factored Queue
length (MFQ), and the FCFS-n algorithm that preassigns
capacity only to each of the $n$ most popular objects.
The algorithms are compared for both {\em competitive
market\/} and {\em captured audience\/} environments.
Key findings of the algorithm comparisons are that: (1)
FCFS-n has no advantage over FCFS if FCFS gives time of
service guarantees to arriving customers, (2) FCFS and
GGSCW-FCFS are superior to MFQ for both competitive and
captive audience environments, (3) for competitive
servers that are configured for customer loss less than
10\%, FCFS is superior to all other algorithms examined
in this paper, and (4) for captive audience
environments that have objects with variable playback
length, GGSCW-FCFS is the most promising of the
policies considered in this paper. The conclusions for
FCFS-n and MFQ differ from previous work because we
focus on competitive environments with customer loss
under 10\%, we assume FCFS can provide time of service
guarantees to all arriving customers, and we consider
the distribution of customer waiting time as well as
the average waiting time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muntz:1997:SIM,
author = "Richard Muntz",
title = "Special Issue on Multimedia Storage Systems",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "2--2",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.581190",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ozden:1997:AIM,
author = "Banu {\"O}zden and Rajeev Rastogi and Avi
Silberschatz",
title = "Architecture issues in multimedia storage systems",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "3--12",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262394",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Next generation storage systems will need to provide
support for both textual data and other types of
multimedia data (e.g., images, video, audio). These two
types of data differ in their characteristics, and
hence require different techniques for their
organization and management. In this paper, we provide
an overview of (1) how storage systems can be
architectured to support multimedia data, and (2) what
are the main challenges in devising new algorithms to
manage multimedia data. In order to provide rate
guarantees for continuous media data, an admission
control scheme must be employed that determines, for
each client, whether there are sufficient resources
available to service that client. To maximize the
number of clients that can be admitted concurrently,
the various system resources must be allocated and
scheduled carefully. In terms of disks, we use
algorithms for retrieving/storing data from/to disks
that reduce seek latency time and eliminate rotational
delay, thereby providing high throughput. In terms of
main-memory, we use buffer management schemes that
exploit the sequential access patterns for continuous
media data, thereby resulting in efficient replacement
of buffer pages from the cache.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shi:1997:BSV,
author = "Weifeng Shi and Shahram Ghandeharizadeh",
title = "Buffer sharing in video-on-demand servers",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "13--20",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262396",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a buffer sharing technique that
strikes a balance between the use of disk bandwidth and
memory in order to maximize the performance of a
video-on-demand server. We make the key observation
that the configuration parameters of the system should
be independent of the physical characteristics of the
data (e.g., popularity of a clip). Instead, the
configuration parameters are fixed and our strategy
adjusts itself dynamically at run-time to support a
pattern of access to the video clips.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:1997:ITD,
author = "Leana Golubchik",
title = "On issues and tradeoffs in design of fault tolerant
{VOD} servers",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "21--28",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent technological advances in digital signal
processing, data compression techniques, and high speed
communication networks have made Video-on-Demand (VOD)
servers feasible. A challenging task in such systems is
servicing multiple clients simultaneously while
satisfying real-time requirements of continuous
delivery of objects at specified rates. To accomplish
these tasks and realize economies of scale associated
with servicing a large user population, a VOD server
requires a large disk subsystem. Although a single disk
is fairly reliable, a large disk farm can have an
unacceptably high probability of disk failure.
Furthermore, due to real-time constraints, the
reliability and availability requirements of VOD
systems are even more stringent than those of
traditional information systems. In this paper we
discuss some of the main issues and tradeoffs
associated with providing fault tolerance in multidisk
VOD systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Muntz:1997:RRT,
author = "Richard Muntz and Jose Renato Santos and Steve
Berson",
title = "{RIO}: a real-time multimedia object server",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "29--35",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A multimedia object server must be ready to handle a
variety of media object types (video, audio, image, 3D
interactive, etc.) as well as non real-time workload.
Even when a homogeneous set of object types are
maintained in the store (e.g., all videos) the storage
system workload is generally quite variable due to the
need to provide for example, VCR functionality,
multiple playout rates, different resolution levels for
the same objects, etc. Attempting to carefully layout
data and optimally schedule delivery to meet
just-in-time delivery constraints is very difficult in
the face of this heterogeneous workload. Our approach
to the unpredictability of the I/O workload is to
randomize the allocation of disk blocks. This turns all
workloads into the same uniformly random access pattern
and thus gives one problem to deal with. The main
disadvantage of this approach is that statistical
variation can result in short term imbalances in disk
utilization which in turn, cause large variances in
latencies. Our approach to this problem is to introduce
limited redundancy and asynchronous scheduling for
short term load balancing. This approach is being
implemented in the RIO (Random I/O) multimedia object
server. The RIO multimedia object server provides
applications a guaranteed rate of storage access with
bounded delay even at very high ({\em > 90\%\/}) disk
utilization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Colajanni:1997:ATS,
author = "Michele Colajanni and Philip S. Yu",
title = "Adaptive {TTL} schemes for load balancing of
distributed {Web} servers",
journal = j-SIGMETRICS,
volume = "25",
number = "2",
pages = "36--42",
month = sep,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/262391.262401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:34 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With ever increasing web traffic, a distributed Web
system can provide scalability and flexibility to cope
with growing client demands. Load balancing algorithms
to spread the load across multiple Web servers are
crucial to achieve the scalability. Various {\em domain
name server\/} (DNS) based schedulers have been
proposed in the literature, mainly for multiple
homogeneous servers. DNS provides (logical) host name
to IP-address mapping (i.e., the server assignment),
but the mapping is not done for each server access.
This is because the address mapping is cached for a
time-to-live (TTL) period to reduce network traffic.
The presence of heterogeneous Web servers not only
increases the complexity of the DNS scheduling problem,
but also makes previously proposed algorithms for
homogeneous distributed systems such as round robin not
directly applicable. This leads us to propose new
policies, called {\em adaptive TTL\/} algorithms, that
take both the uneven distribution of client request
rates and heterogeneity of Web servers into account to
adaptively set the TTL value for each address mapping
request. Extensive simulation results show that these
strategies are effective in balancing load among
geographically distributed heterogeneous Web servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kotz:1997:SIP,
author = "David Kotz",
title = "Special Issue on Parallel {I/O} Systems",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "2--2",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.581191",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cormen:1997:CFP,
author = "Thomas H. Cormen and David M. Nicol",
title = "Out-of-core {FFTs} with parallel disks",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "3--12",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We examine approaches to computing the Fast Fourier
Transform (FFT) when the data size exceeds the size of
main memory. Analytical and experimental evidence shows
that relying on native virtual memory with demand
paging can yield extremely poor performance. We then
present approaches based on minimizing I/O costs with
the Parallel Disk Model (PDM). Each of these approaches
explicitly plans and performs disk accesses so as to
minimize their number.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papadopouli:1997:SVV,
author = "Maria Papadopouli and Leana Golubchik",
title = "Support of {VBR} video streams under disk bandwidth
limitations",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "13--20",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present scheduling techniques for a {\em
scalable\/} video server in a multi-disk environment.
The scheduling of the retrieval is introduced in a
dynamic rate-distortion context that exploits both the
multiresolution property of video and replication
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bordawekar:1997:EEH,
author = "Rajesh Bordawekar and Steven Landherr and Don Capps
and Mark Davis",
title = "Experimental evaluation of the {Hewlett--Packard}
{Exemplar} file system",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "21--28",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This article presents results from an experimental
evaluation study of the HP Exemplar file system. Our
experiments consist of simple micro-benchmarks that
study the impact of various factors on the file system
performance. These factors include I/O request/buffer
sizes, vectored/non-vectored access patterns,
read-ahead policies, multi-threaded (temporally
irregular) requests, and architectural issues (cache
parameters, NUMA behavior, etc.). Experimental results
indicate that the Exemplar file system provides high
I/O bandwidth, both for single- and multi-threaded
applications. The buffer cache, with prioritized buffer
management and large buffer sizes, is effective in
exploiting temporal and spatial access localities. The
performance of non-contiguous accesses can be improved
by either using vectored I/O interfaces or tuning the
read-ahead facilities. The file system performance
depends on the relative locations of the computing
threads and the file system, and also on various
Exemplar design parameters such as the NUMA
architecture, TLB/data cache management and paging
policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rochberg:1997:PNE,
author = "David Rochberg and Garth Gibson",
title = "Prefetching over a network: early experience with
{CTIP}",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "29--36",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss CTIP, an implementation of a network
filesystem extension of the successful TIP informed
prefetching and cache management system. Using a
modified version of TIP in NFS client machines (and
unmodified NFS servers). CTIP takes advantage of
application-supplied hints that disclose the
application's future read accesses. CTIP uses these
hints to aggressively prefetch file data from an NFS
file server and to make better local cache replacement
decisions. This prefetching hides disk latency and
exposes storage parallelism. Preliminary measurements
that show CTIP can reduce execution time by a ratio
comparable to that obtained with local TIP over a suite
of I/O-intensive hinting applications. (For four disks,
the reductions in execution time range from 17\% to
69\%). If local TIP execution requires that data first
be loaded from remote storage into a local scratch
area, then CTIP execution is significantly faster than
the aggregate time for loading the data and executing.
Additionally, our measurements show that the benefit of
CTIP for hinting applications improves in the face of
competition from other clients for server resources. We
conclude with an analysis of the remaining problems
with using unmodified NFS servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menon:1997:DVD,
author = "Jai Menon and Kent Treiber",
title = "{Daisy}: virtual-disk hierarchical storage manager",
journal = j-SIGMETRICS,
volume = "25",
number = "3",
pages = "37--44",
month = dec,
year = "1997",
CODEN = "????",
DOI = "https://doi.org/10.1145/270900.270908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:24:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:1998:SIT,
author = "David M. Nicol",
title = "Special Issue on the {Telecommunications Description
Language}",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "3--3",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.581192",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perumalla:1998:TLM,
author = "Kalyan Perumalla and Richard Fujimoto and Andrew
Ogielski",
title = "{TED} --- a language for modeling telecommunication
networks",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "4--11",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TeD is a language designed mainly for modeling
telecommunication networks. The TeD language
specification is separated into two parts --- (1) a
{\em meta\/} language (2) an {\em external\/} language.
The meta language specification is concerned with the
high-level description of the structural and behavioral
interfaces of various network elements. The external
language specification is concerned with the detailed
low-level description of the implementation of the
structure and behavior of the network elements. In this
document, we present an introduction to the TeD
language, along with a brief tutorial using an example
model of a simple ATM multiplexer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Perumalla:1998:TMA,
author = "Kalyan Perumalla and Matthew Andrews and Sandeep
Bhatt",
title = "{TED} models for {ATM} internetworks",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "12--21",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe our experiences designing and implementing
a virtual PNNI network testbed. The network elements
and signaling protocols modeled are consistent with the
ATM Forum {\em PNNI\/} draft specifications. The models
will serve as a high-fidelity testbed of the transport
and network layers for simulation-based studies of the
scalability and performance of PNNI protocols. Our
models are written in the new network description
language
TeD which offers two advantages. First, the testbed
design is transparent; the model descriptions are
developed separately from, and are independent of, the
simulation-specific code. Second, TeD is compiled to
run with the GTW (Georgia Tech Time Warp) simulation
engine which is supported on shared-memory
multiprocessors. Therefore, we directly obtain the
advantages of parallel simulation. This is one of the
first complex tests of the TeD modeling and simulation
software system. The feedback from our experiences
resulted in some significant improvements to the
simulation software. The resulting {\em PNNI\/} models
are truly transparent and the performance of the
simulations is encouraging. We give results from
preliminary simulations of call admission, set-up and
tear-down in sample {\em PNNI\/} networks consisting of
two hundred nodes and over three hundred edges. The
time to simulate ten thousand call requests decreases
significantly with the number of processors; we observe
a speedup factor of 5.05 when 8 processors are employed
compared to a single processor. Our initial
implementations demonstrate the advantages of TeD for
parallel simulations of large-scale networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rubenstein:1998:OPS,
author = "Dan Rubenstein and Jim Kurose and Don Towsley",
title = "Optimistic parallel simulation of reliable multicast
protocols",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "22--29",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel discrete-event simulation offers the promise
of harnessing the computational power of multiple
processors in order to reduce the time needed for
simulation-based performance studies. In this paper, we
investigate the use of {\em optimistic parallel
simulation techniques\/} in simulating reliable
multicast communication network protocols. Through
empirical studies (using the TeD simulation programming
language, the Georgia Tech time warp simulator, and a
12-processor SGI Challenge), we find that these
parallelized simulations can run noticeably faster than
a uniprocessor simulation and, in a number of cases,
can make effective use of parallel resources. These
results are somewhat surprising because reliable
multicast protocols require considerable communication
(and hence synchronization) among different network
entities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Panchal:1998:PSW,
author = "Jignesh Panchal and Owen Kelly and Jie Lai and Narayan
Mandayam and Andarew T. Ogielski and Roy Yates",
title = "Parallel simulations of wireless networks with {TED}:
radio propagation, mobility and protocols",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "30--39",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe the
TeD/C++ implementation of {\em WiPPET}, a parallel
simulation testbed for mobile wireless networks. In
this article we emphasize the techniques for modeling
of radio propagation (long- and short-scale fading and
interference) and protocols for integrated radio
resource management in mobile wireless voice networks.
The testbed includes the standards-based AMPS, NA-TDMA
and GSM protocols, and several research-oriented
protocol families.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Premore:1998:TNT,
author = "Brian J. Premore and David M. Nicol",
title = "Transformation of {\em ns\/} {TCP} models to {TED}",
journal = j-SIGMETRICS,
volume = "25",
number = "4",
pages = "40--48",
month = mar,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/274084.274089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:03 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers problems that arise when
transforming TCP models developed using the {\em ns\/}
simulator, to the TeD meta-language. The raison
d'{\^e}tre for this project is to evaluate the
potential of TeD as the target of an automated
simulation model transformation system, so as to
exploit the considerable existing modeling work that
has already been conducted using {\em ns}. By
transforming {\em ns\/} models to TeD we hope to
provide high performance parallel simulation to
detailed and accurate network models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Srinivasan:1998:FIL,
author = "V. Srinivasan and George Varghese",
title = "Faster {IP} lookups using controlled prefix
expansion",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "1--10",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet (IP) address lookup is a major bottleneck in
high performance routers. IP address lookup is
challenging because it requires {\em a longest matching
prefix\/} lookup. It is compounded by increasing
routing table sizes, increased traffic, higher speed
links, and the migration to 128 bit IPv6 addresses. We
describe how IP lookups can be made faster using a new
technique called {\em controlled prefix expansion}.
Controlled prefix expansion, together with optimization
techniques based on dynamic programming, can be used to
improve the speed of the best known IP lookup
algorithms by at least a factor of two. When applied to
trie search, our techniques provide a range of
algorithms whose performance can be tuned. For example,
with 1 MB of L2 cache, trie search of the MaeEast
database with 38,000 prefixes can be done in a worst
case search time of 181 nsec, a worst case
insert/delete time of 2.5 msec, and an average
insert/delete time of 4 usec. Our actual experiments
used 512 KB L2 cache to obtain a worst-case search time
of 226 nsec, a worst-case worst case insert/delete time
of 2.5 msec and an average insert/delete time of 4
usec. We also describe how our techniques can be used
to improve the speed of binary search on prefix lengths
to provide a scalable solution for IPv6. Our approach
to algorithm design is based on measurements using the
VTune tool on a Pentium to obtain dynamic clock cycle
counts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paxson:1998:CMP,
author = "Vern Paxson",
title = "On calibrating measurements of packet transit times",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "11--21",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss the problem of detecting errors in
measurements of the total delay experienced by packets
transmitted through a wide-area network. We assume that
we have measurements of the transmission times of a
group of packets sent from an originating host, {\em
A}, and a corresponding set of measurements of their
arrival times at their destination host, {\em B},
recorded by two separate clocks. We also assume that we
have a similar series of measurements of packets sent
from $B$ to $A$ (as might occur when recording a TCP
connection), but we do not assume that the clock at $A$
is synchronized with the clock at {\em B}, nor that
they run at the same frequency. We develop robust
algorithms for detecting abrupt adjustments to either
clock, and for estimating the relative skew between the
clocks. By analyzing a large set of measurements of
Internet TCP connections, we find that both clock
adjustments and relative skew are sufficiently common
that failing to detect them can lead to potentially
large errors when analyzing packet transit times. We
further find that synchronizing clocks using a network
time protocol such as NTP does not free them from such
errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:1998:MCP,
author = "Randolph Y. Wang and Arvind Krishnamurthy and Richard
P. Martin and Thomas E. Anderson and David E. Culler",
title = "Modeling communication pipeline latency",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "22--32",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study how to minimize the latency of
a message through a network that consists of a number
of store-and-forward stages. This research is
especially relevant for today's low overhead
communication systems that employ dedicated processing
elements for protocol processing. We develop an
abstract pipeline model that reveals a crucial
performance tradeoff involving the effects of the
overhead of the bottleneck stage and the bandwidth of
the remaining stages. We exploit this tradeoff to
develop a suite of fragmentation algorithms designed to
minimize message latency. We also provide an
experimental methodology that enables the construction
of customized pipeline algorithms that can adapt to the
specific system characteristics and application
workloads. By applying this methodology to the
Myrinet-GAM system, we have improved its latency by up
to 51\%. Our theoretical framework is also applicable
to pipelined systems beyond the context of high speed
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Voelker:1998:ICP,
author = "Geoffrey M. Voelker and Eric J. Anderson and Tracy
Kimbrel and Michael J. Feeley and Jeffrey S. Chase and
Anna R. Karlin and Henry M. Levy",
title = "Implementing cooperative prefetching and caching in a
globally-managed memory system",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "33--43",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents {\em cooperative prefetching and
caching\/} --- the use of network-wide global resources
(memories, CPUs, and disks) to support prefetching and
caching in the presence of hints of future demands.
Cooperative prefetching and caching effectively unites
disk-latency reduction techniques from three lines of
research: prefetching algorithms, cluster-wide memory
management, and parallel I/O. When used together, these
techniques greatly increase the power of prefetching
relative to a conventional (non-global-memory) system.
We have designed and implemented PGMS, a cooperative
prefetching and caching system, under the Digital Unix
operating system running on a 1.28 Gb/sec
Myrinet-connected cluster of DEC Alpha workstations.
Our measurements and analysis show that by using
available global resources, cooperative prefetching can
obtain significant speedups for I/O-bound programs. For
example, for a graphics rendering application, our
system achieves a speedup of 4.9 over a non-prefetching
version of the same program, and a 3.1-fold improvement
over that program using local-disk prefetching alone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shenoy:1998:CDS,
author = "Prashant J. Shenoy and Harrick M. Vin",
title = "{Cello}: a disk scheduling framework for next
generation operating systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "44--55",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277871",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present the Cello disk scheduling
framework for meeting the diverse service requirements
of applications. Cello employs a two-level disk
scheduling architecture, consisting of a
class-independent scheduler and a set of class-specific
schedulers. The two levels of the framework allocate
disk bandwidth at two time-scales: the
class-independent scheduler governs the coarse-grain
allocation of bandwidth to application classes, while
the class-specific schedulers control the fine-grain
interleaving of requests. The two levels of the
architecture separate application-independent
mechanisms from application-specific scheduling
policies, and thereby facilitate the co-existence of
multiple class-specific schedulers. We demonstrate that
Cello is suitable for next generation operating systems
since: (i) it aligns the service provided with the
application requirements, (ii) it protects application
classes from one another, (iii) it is work-conserving
and can adapt to changes in work-load, (iv) it
minimizes the seek time and rotational latency overhead
incurred during access, and (v) it is computationally
efficient.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rosti:1998:IPB,
author = "Emilia Rosti and Giuseppe Serazzi and Evgenia Smirni
and Mark S. Squillante",
title = "The impact of {I/O} on program behavior and parallel
scheduling",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "56--65",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we systematically examine various
performance issues involved in the coordinated
allocation of processor and disk resources in
large-scale parallel computer systems. Models are
formulated to investigate the I/O and computation
behavior of parallel programs and workloads, and to
analyze parallel scheduling policies under such
workloads. These models are parameterized by
measurements of parallel programs, and they are solved
via analytic methods and simulation. Our results
provide important insights into the performance of
parallel applications and resource management
strategies when I/O demands are not negligible.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bajaj:1998:SPU,
author = "Sandeep Bajaj and Lee Breslau and Scott Shenker",
title = "Is service priority useful in networks?",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "66--77",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A key question in the definition of new services for
the Internet is whether to provide a single class of
relaxed real-time service or multiple levels
differentiated by their delay characteristics. In that
context we pose the question: is service priority
useful in networks? We argue that, contrary to some of
our earlier work, to properly address this question one
cannot just consider raw network-centric performance
numbers, such as the delay distribution. Rather, one
must incorporate two new elements into the analysis:
the utility functions of the applications (how
application performance depends on network service),
and the adaptive nature of applications (how
applications react to changing network service). This
last point is especially crucial; modern Internet
applications are designed to tolerate a wide range of
network service quality, and they do so by adapting to
the current network conditions. Most previous
investigations of network performance have neglected to
include this adaptive behavior. In this paper we
present an analysis of service priority in the context
of audio applications embodying these two elements:
utility functions and adaptation. Our investigation is
far from conclusive. The definitive answer to the
question depends on many factors that are outside the
scope of this paper and are, at present, unknowable,
such as the burstiness of future Internet traffic and
the relative offered loads of best-effort and real-time
applications. Despite these shortcomings, our analysis
illustrates this new approach to evaluating network
design decisions, and sheds some light on the
properties of adaptive applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kalampoukas:1998:ITT,
author = "Lampros Kalampoukas and Anujan Varma and K. K.
Ramakrishnan",
title = "Improving {TCP} throughput over two-way asymmetric
links: analysis and solutions",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "78--89",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The sharing of a common buffer by TCP data segments
and acknowledgments in a network or internet has been
known to produce the effect of {\em ack compression},
often causing dramatic reductions in throughput. We
study several schemes for improving the performance of
two-way TCP traffic over asymmetric links where the
bandwidths in the two directions may differ
substantially, possibly by many orders of magnitude.
These approaches reduce the effect of ack compression
by carefully controlling the flow of data packets and
acknowledgments. We first examine a scheme where
acknowledgments are transmitted at a higher priority
than data. By analysis and simulation, we show that
prioritizing acks can lead to starvation of the
low-bandwidth connection. Next, we introduce and
analyze a connection-level backpressure mechanism
designed to limit the maximum amount of data buffered
in the outgoing IP queue of the source of the
low-bandwidth connection. We show that this approach,
while minimizing the queueing delay for acks, results
in unfair bandwidth allocation on the slow link.
Finally, our preferred solution separates the acks from
data packets in the outgoing queue, and makes use of a
connection-level bandwidth allocation mechanism to
control their bandwidth shares. We show that this
scheme overcomes the limitations of the previous
approaches, provides isolation, and enables precise
control of the connection throughputs. We present
analytical models of the dynamic behavior of each of
these approaches, derive closed-form expressions for
the expected connection efficiencies in each case, and
validate them with simulation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raman:1998:ABG,
author = "Suchitra Raman and Steven McCanne and Scott Shenker",
title = "Asymptotic behavior of global recovery in {SRM}",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "90--99",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The development and deployment of a large-scale,
wide-area multicast infrastructure in the Internet has
enabled a new family of multi-party, collaborative
applications. Several of these applications, such as
multimedia slide shows, shared whiteboards, and
large-scale multi-player games, require {\em
reliable\/} multicast transport, yet the underlying
multicast infrastructure provides only a best-effort
delivery service. A difficult challenge in the design
of efficient protocols that provide reliable service on
top of the best-effort multicast service is to maintain
acceptable performance as the protocol {\em scales\/}
to very large session sizes distributed across the wide
area. The Scalable, Reliable Multicast (SRM) protocol
[6] is a receiver-driven scheme based on negative
acknowledgments (NACKs) reliable multicast protocol
that uses randomized timers to limit the amount of
protocol overhead in the face of large multicast
groups, but the behavior of SRM at extremely large
scales is not well-understood. In this paper, we use
analysis and simulation to investigate the scaling
behavior of global loss recovery in SRM. We study the
protocol's control-traffic overhead as a function of
group size for various topologies and protocol
parameters, on a set of simple, representative
topologies --- the cone (a variant of a clique), the
linear chain, and the binary tree. We find that this
overhead, as a function of group size, depends strongly
on the topology: for the cone, it is always linear; for
the chain, it is between constant and logarithmic; and
for the tree, it is between constant and linear.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boxma:1998:BPF,
author = "O. J. Boxma and V. Dumas",
title = "The busy period in the fluid queue",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "100--110",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a fluid queue fed by $N$ on/off sources. It
is assumed that the silence periods of the sources are
exponentially distributed, whereas the activity periods
are generally distributed. The inflow rate of each
source, when active, is at least as large as the
outflow rate of the buffer. We make two contributions
to the performance analysis of this model. Firstly, we
determine the Laplace--Stieltjes transforms of the
distributions of the busy periods that start with an
active period of source $ i, i = 1, \ldots {}, N$, as
the unique solution in $ [0, 1]^N$ of a set of $N$
equations. Thus we also find the Laplace--Stieltjes
transform of the distribution of an arbitrary busy
period. Secondly, we relate the tail behaviour of the
busy period distributions to the tail behaviour of the
activity period distributions. We show that the tails
of all busy period distributions are regularly varying
of index $ - \nu $ iff the heaviest of the tails of the
activity period distributions are regularly varying of
index $ - \nu $ We provide explicit equivalents of the
former in terms of the latter, which show that the
contribution of the sources with lighter associated
tails is equivalent to a simple reduction of the
outflow rate. These results have implications for the
performance analysis of networks of fluid queues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:1998:TLP,
author = "Guang-Liang Li and Jun-Hong Cui and Bo Li and
Fang-Ming Li",
title = "Transient loss performance of a class of finite buffer
queueing systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "111--120",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance-oriented studies typically rely on the
assumption that the stochastic process modeling the
phenomenon of interest is already in steady state. This
assumption is, however, not valid if the life cycle of
the phenomenon under study is not large enough, since
usually a stochastic process cannot reach steady state
unless time evolves towards infinity. Therefore, it is
important to address performance issues in transient
state. Previous work in transient analysis of queueing
systems usually focuses on Markov models. This paper,
in contrast, presents an analysis of transient loss
performance for a class of finite buffer queueing
systems that are not necessarily Markovian. We obtain
closed-form transient loss performance measures. Based
on the loss measures, we compare transient loss
performance against steady-state loss performance and
examine how different assumptions on the arrival
process will affect transient loss behavior of the
queueing system. We also discuss how to guarantee
transient loss performance. The analysis is illustrated
with numerical results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "queueing systems; stochastic modeling; transient loss
performance",
}
@Article{McKinnon:1998:QBA,
author = "Martin W. McKinnon and George N. Rouskas and Harry G.
Perros",
title = "Queueing-based analysis of broadcast optical
networks",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "121--130",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277888",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider broadcast WDM networks operating with
schedules that mask the transceiver tuning latency. We
develop and analyze a queueing model of the network in
order to obtain the queue-length distribution and the
packet loss probability at the transmitting and
receiving side of the nodes. The analysis is carried
out assuming finite buffer sizes, non-uniform
destination probabilities and two-state MMBP traffic
sources; the latter naturally capture the notion of
burstiness and correlation, two important
characteristics of traffic in high-speed networks. We
present results which establish that the performance of
the network is a complex function of a number of system
parameters, including the load balancing and scheduling
algorithms, the number of available channels, and the
buffer capacity. We also show that the behavior of the
network in terms of packet loss probability as these
parameters are varied cannot be predicted without an
accurate analysis. Our work makes it possible to study
the interactions among the system parameters, and to
predict, explain and fine tune the performance of the
network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "discrete-time queueing networks; Markov modulated
Bernoulli process; optical networks; wavelength
division multiplexing",
}
@Article{Bavier:1998:PME,
author = "Andy C. Bavier and A. Brady Montz and Larry L.
Peterson",
title = "Predicting {MPEG} execution times",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "131--140",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277892",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reports on a set of experiments that
measure the amount of CPU processing needed to decode
MPEG-compressed video in software. These experiments
were designed to discover indicators that could be used
to predict how many cycles are required to decode a
given frame. Such predictors can be used to do more
accurate CPU scheduling. We found that by considering
both frame type and size, it is possible to construct a
linear model of MPEG decoding with $ R^2 $ values of
0.97 and higher. Moreover, this model can be used to
predict decoding times at both the frame and packet
level that are almost always accurate to within 25\% of
the actual decode times. This is a surprising result
given the large variability in MPEG decoding times, and
suggests that it is feasible to design systems that
make quality of service guarantees for MPEG-encoded
video.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gribble:1998:SSF,
author = "Steven D. Gribble and Gurmeet Singh Manku and Drew
Roselli and Eric A. Brewer and Timothy J. Gibson and
Ethan L. Miller",
title = "Self-similarity in file systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "141--150",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We demonstrate that high-level file system events
exhibit self-similar behaviour, but only for short-term
time scales of approximately under a day. We do so
through the analysis of four sets of traces that span
time scales of milliseconds through months, and that
differ in the trace collection method, the filesystems
being traced, and the chronological times of the
tracing. Two sets of detailed, short-term file system
trace data are analyzed; both are shown to have
self-similar like behaviour, with consistent Hurst
parameters (a measure of self-similarity) for all file
system traffic as well as individual classes of file
system events. Long-term file system trace data is then
analyzed, and we discover that the traces' high
variability and self-similar behaviour does not persist
across time scales of days, weeks, and months. Using
the short-term trace data, we show that sources of file
system traffic exhibit ON/OFF source behaviour, which
is characterized by highly variably lengthened bursts
of activity, followed by similarly variably lengthened
periods of inactivity. This ON/OFF behaviour is used to
motivate a simple technique for synthesizing a stream
of events that exhibit the same self-similar short-term
behaviour as was observed in the file system traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:1998:GRW,
author = "Paul Barford and Mark Crovella",
title = "Generating representative {Web} workloads for network
and server performance evaluation",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "151--160",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277897",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One role for workload generation is as a means for
understanding how servers and networks respond to
variation in load. This enables management and capacity
planning based on current and projected usage. This
paper applies a number of observations of Web server
usage to create a realistic Web workload generation
tool which mimics a set of real users accessing a
server. The tool, called
Surge (Scalable URL Reference Generator) generates
references matching empirical measurements of (1)
server file size distribution; (2) request size
distribution; (3) relative file popularity; (4)
embedded file references; (5) temporal locality of
reference; and (6) idle periods of individual users.
This paper reviews the essential elements required in
the generation of a representative Web workload. It
also addresses the technical challenges to satisfying
this large set of simultaneous constraints on the
properties of the reference stream, the solutions we
adopted, and their associated accuracy. Finally, we
present evidence that Surge exercises servers in a
manner significantly different from other Web server
benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ji:1998:PMM,
author = "Minwen Ji and Edward W. Felten and Kai Li",
title = "Performance measurements for multithreaded programs",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "161--170",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multithreaded programming is an effective way to
exploit concurrency, but it is difficult to debug and
tune a highly threaded program. This paper describes a
performance tool called Tmon for monitoring, analyzing
and tuning the performance of multithreaded programs.
The performance tool has two novel features: it uses
`thread waiting time' as a measure and constructs
thread waiting graphs to show thread dependencies and
thus performance bottlenecks, and it identifies
`semi-busy-waiting' points where CPU cycles are wasted
in condition checking and context switching. We have
implemented the Tmon tool and, as a case study, we have
used it to measure and tune a heavily threaded file
system. We used four workloads to tune different
aspects of the file system. We were able to improve the
file system bandwidth and throughput significantly. In
one case, we were able to improve the bandwidth by two
orders of magnitude.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:1998:MES,
author = "Dongming Jiang and Jaswinder Pal Singh",
title = "A methodology and an evaluation of the {SGI Origin
2000}",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "171--181",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As hardware-coherent, distributed shared memory (DSM)
multiprocessing becomes popular commercially, it is
important to evaluate modern realizations to understand
how they perform and scale for a range of interesting
applications and to identify the nature of the key
bottlenecks. This paper evaluates the SGI Origin
2000---the machine that perhaps has the most aggressive
communication architecture of the recent cache-coherent
offerings---and, in doing so, articulates a sound
methodology for evaluating real systems. We examine
data access and synchronization microbenchmarks;
speedups for different application classes, problem
sizes and scaling models; detailed interactions and
time breakdowns using performance tools; and the impact
of special hardware support. We find that overall the
Origin appears to deliver on the promise of
cache-coherent shared address space multiprocessing, at
least at the 32-processor scale we examine. The machine
is quite easy to program for performance and has fewer
organizational problems than previous systems we have
examined. However, some important trouble spots are
also identified, especially related to contention that
is apparently caused by engineering decisions to share
resources among processors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shriver:1998:ABM,
author = "Elizabeth Shriver and Arif Merchant and John Wilkes",
title = "An analytic behavior model for disk drives with
readahead caches and request reordering",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "182--191",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern disk drives read-ahead data and reorder
incoming requests in a workload-dependent fashion. This
improves their performance, but makes simple analytical
models of them inadequate for performance prediction,
capacity planning, workload balancing, and so on. To
address this problem we have developed a new analytic
model for disk drives that do readahead and request
reordering. We did so by developing performance models
of the disk drive components (queues, caches, and the
disk mechanism) and a workload transformation technique
for composing them. Our model includes the effects of
workload-specific parameters such as request size and
spatial locality. The result is capable of predicting
the behavior of a variety of real-world devices to
within 17\% across a variety of workloads and disk
drives.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fraguela:1998:MSA,
author = "Basilio B. Fraguela and Ram{\'o}n Doallo and Emilio L.
Zapata",
title = "Modeling set associative caches behavior for irregular
computations",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "192--201",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While much work has been devoted to the study of cache
behavior during the execution of codes with regular
access patterns, little attention has been paid to
irregular codes. An important portion of these codes
are scientific applications that handle compressed
sparse matrices. In this work a probabilistic model for
the prediction of the number of misses on a $K$-way
associative cache memory considering sparse matrices
with a uniform or banded distribution is presented. Two
different irregular kernels are considered: the sparse
matrix-vector product and the transposition of a sparse
matrix. The model was validated with simulations on
synthetic uniform matrices and banded matrices from the
Harwell-Boeing collection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache performance; irregular computation;
probabilistic model; sparse matrix",
}
@Article{Jiang:1998:IRF,
author = "Tianji Jiang and Mostafa H. Ammar and Ellen W.
Zegura",
title = "Inter-receiver fairness: a novel performance measure
for multicast {ABR} sessions",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "202--211",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a multicast ABR service, a connection is typically
restricted to the rate allowed on the bottleneck link
in the distribution tree from the source to the set of
receivers. Because of this, receivers in the connection
can experience {\em inter-receiver unfairness}, when
the preferred operating rates of the receivers are
different. In this paper we explore the issue of
improving the inter-receiver fairness in a multicast
ABR connection by allowing the connection to operate at
a rate higher than what is allowed by the multicast
tree's bottleneck link. Since this can result in cell
loss to some receivers, we operate with the knowledge
of each receiver's application-specific loss tolerance.
The multicast connection rate is not allowed to
increase beyond the point where the cell loss on a path
to a receiver exceeds this receiver's loss tolerance.
Based on these ideas we develop an inter-receiver
fairness measure and a technique for determining the
rate that maximizes this measure. We show possible
switch algorithms that can be used to convey the
parameters needed to compute the function to the
connection's source. In addition we develop a global
network measure that helps us assess the effect of
increasing inter-receiver fairness on the total network
delivered throughput. We also briefly explore improving
inter-receiver fairness through the use of multiple
virtual circuits to carry traffic for a single
multicast session. A set of examples demonstrate the
use of the inter-receiver fairness concept in various
network scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Courcoubetis:1998:AEL,
author = "Costas Courcoubetis and Vasilios A. Siris and George
D. Stamoulis",
title = "Application and evaluation of large deviation
techniques for traffic engineering in broadband
networks",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "212--221",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Accurate yet simple methods for traffic engineering
are important for efficient dimensioning of broadband
networks. The goal of this paper is to apply and
evaluate large deviation techniques for traffic
engineering. In particular, we employ the recently
developed theory of {\em effective bandwidths}, where
the effective bandwidth depends not only on the
statistical characteristics of the traffic stream, but
also on a link's operating point through two
parameters, the {\em space\/} and {\em time\/}
parameters, which are computed using the {\em many
sources asymptotic}. We show that this effective
bandwidth definition can accurately quantify resource
usage. Furthermore, we estimate and interpret values of
the space and time parameters for various mixes of real
traffic demonstrating how these values can be used to
clarify the effects on the link performance of the time
scales of burstiness of the traffic input, of the link
parameters (capacity and buffer), and of traffic
control mechanisms, such as traffic shaping. Our
approach relies on off-line analysis of traffic traces,
the granularity of which is determined by the time
parameter of the link, and our experiments involve a
large set of MPEG-1 compressed video and Internet Wide
Area Network (WAN) traces, as well as modeled voice
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ATM; broadband networks; effective bandwidths; large
deviations; traffic engineering",
}
@Article{Neidhardt:1998:CRT,
author = "Arnold L. Neidhardt and Jonathan L. Wang",
title = "The concept of relevant time scales and its
application to queuing analysis of self-similar traffic
(or is {Hurst} naughty or nice?)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "222--232",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277923",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent traffic analyses from various packet networks
have shown the existence of long-range dependence in
bursty traffic. In evaluating its impact on queuing
performance, earlier investigations have noted how the
presence of long-range dependence, or a high value of
the Hurst parameter $H$, is often associated with
surprisingly large queue sizes. As a result, a common
impression has been created of expecting queuing
performance to be worse as $H$ increases, but this
impression can be misleading. In fact, there are
examples in which larger values of $H$ are associated
with smaller queues. So the question is how can one
tell whether queuing performance would improve or
degrade as $H$ rises? In this paper, we show that the
relative queuing performance can be assessed by
identifying a couple of time scales. First, in
comparing a high-$H$ process with a low-$H$ process,
there is a unique time scale $ t_m$ at which the
variances of the two processes match (assuming exact,
second-order self similarity for both processes).
Second, there are time scales $ t_{qi}$ that are most
relevant for queuing the arrivals of process $i$. If
both of the queuing scales $ t_{qi}$ exceed the
variance-matching scale $ t_m$, then the high-$H$ queue
is worse; if the queuing scales are smaller, then the
low-$H$ queue is worse. However, no firm prediction can
be made in the remaining case of $ t_m$ falling between
the two queuing scales. Numerical examples are given to
demonstrate our results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arpaci-Dusseau:1998:SII,
author = "Andrea C. Arpaci-Dusseau and David E. Culler and Alan
M. Mainwaring",
title = "Scheduling with implicit information in distributed
systems",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "233--243",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em Implicit coscheduling\/} is a distributed
algorithm for time-sharing communicating processes in a
cluster of workstations. By observing and reacting to
implicit information, local schedulers in the system
make independent decisions that dynamically coordinate
the scheduling of communicating processes. The
principal mechanism involved is {\em two-phase
spin-blocking\/}: a process waiting for a message
response spins for some amount of time, and then
relinquishes the processor if the response does not
arrive. In this paper, we describe our experience
implementing implicit coscheduling on a cluster of 16
UltraSPARC I workstations; this has led to
contributions in three main areas. First, we more
rigorously analyze the two-phase spin-block algorithm
and show that spin time should be increased when a
process is receiving messages. Second, we present
performance measurements for a wide range of synthetic
benchmarks and for seven Split-C parallel applications.
Finally, we show how implicit coscheduling behaves
under different job layouts and scaling, and discuss
preliminary results for achieving fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:1998:SPS,
author = "Thu D. Nguyen and John Zahorjan",
title = "Scheduling policies to support distributed {$3$D}
multimedia applications",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "244--253",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277930",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of scheduling the rendering
component of 3D multimedia applications on a cluster of
workstations connected via a local area network. Our
goal is to meet a periodic real-time constraint. In
abstract terms, the problem we address is how best to
schedule tasks with unpredictable service times on
distinct processing nodes so as to meet a real-time
deadline, given that all communication among nodes
entails some (possibly large) overhead. We consider two
distinct classes of schemes, {\em static}, in which
task reallocations are scheduled to occur at specific
times, and {\em dynamic}, in which reallocations are
triggered by some processor going idle. For both
classes we further examine both {\em global\/}
reassignments, in which all nodes are rescheduled at a
rescheduling moment, and {\em local\/} reassignments,
in which only a subset of the nodes engage in
rescheduling at any one time. We show that global
dynamic policies work best over a range of
parameterizations appropriate to such systems. We
introduce a new policy, Dynamic with Shadowing, that
places a small number of tasks in the schedules of
multiple workstations to reduce the amount of
communication required to complete the schedule. This
policy is shown to dominate the other alternatives
considered over most of the parameter space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moritz:1998:LMN,
author = "Csaba Andras Moritz and Matthew I. Frank",
title = "{LoGPC}: modeling network contention in
message-passing programs",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "254--263",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277933",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many real applications, for example those with
frequent and irregular communication patterns or those
using large messages, network contention and contention
for message processing resources can be a significant
part of the total execution time. This paper presents a
new cost model, called LoGPC, that extends the LogP [9]
and LogGP [4] models to account for the impact of
network contention and network interface DMA behavior
on the performance of message-passing programs. We
validate LoGPC by analyzing three applications
implemented with Active Messages [11, 18] on the MIT
Alewife multiprocessor. Our analysis shows that network
contention accounts for up to 50\% of the total
execution time. In addition, we show that the impact of
communication locality on the communication costs is at
most a factor of two on Alewife. Finally, we use the
model to identify tradeoffs between synchronous and
asynchronous message passing styles.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barve:1998:MOT,
author = "Rakesh Barve and Elizabeth Shriver and Phillip B.
Gibbons and Bruce K. Hillyer and Yossi Matias and
Jeffrey Scott Vitter",
title = "Modeling and optimizing {I/O} throughput of multiple
disks on a bus (summary)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "264--265",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277936",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For a wide variety of computational tasks, disk I/O
continues to be a serious obstacle to high performance.
The focus of the present paper is on systems that use
multiple disks per SCSI bus. We measured the
performance of concurrent random I/Os, and observed
bus-related phenomena that impair performance. We
describe these phenomena, and present a new I/O
performance model that accurately predicts the average
bandwidth achieved by a heavy workload of random reads
from disks on a SCSI bus. This model, although
relatively simple, predicts performance on several
platforms to within 12\% for I/O sizes in the range
16-128 KB. We describe a technique to improve the I/O
bandwidth by 10-20\% for random-access workloads that
have large I/Os and high concurrency. This technique
increases the percentage of disk head positioning time
that is overlapped with data transfers, and increases
the percentage of transfers that occur at bus
bandwidth, rather than at disk-head bandwidth.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blumofe:1998:PWS,
author = "Robert D. Blumofe and Dionisios Papadopoulos",
title = "The performance of work stealing in multiprogrammed
environments (extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "266--267",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277939",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crovella:1998:TAD,
author = "Mark E. Crovella and Mor Harchol-Balter and Cristina
D. Murta",
title = "Task assignment in a distributed system (extended
abstract): improving performance by unbalancing load",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "268--269",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277942",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of task assignment in a
distributed system (such as a distributed Web server)
in which task sizes are drawn from a heavy-tailed
distribution. Many task assignment algorithms are based
on the heuristic that balancing the load at the server
hosts will result in optimal performance. We show this
conventional wisdom is less true when the task size
distribution is heavy-tailed (as is the case for Web
file sizes). We introduce a new task assignment policy,
called Size Interval Task Assignment with Variable Load
(SITA-V). SITA-V purposely operates the server hosts at
different loads, and directs smaller tasks to the
lighter-loaded hosts. The result is that SITA-V
provably decreases the mean task slowdown by
significant factors (up to 1000 or more) where the more
heavy-tailed the workload, the greater the improvement
factor. We evaluate the tradeoff between improvement in
slowdown and increase in waiting time in a system using
SITA-V, and show conditions under which SITA-V
represents a particularly appealing policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Manley:1998:SSS,
author = "Stephen Manley and Margo Seltzer and Michael Courage",
title = "A self-scaling and self-configuring benchmark for
{Web} servers (extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "270--271",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277945",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "World Wide Web clients and servers have become some of
the most important applications in our computing base,
and we need realistic and meaningful ways of measuring
their performance. Current server benchmarks do not
capture the wide variation that we see in servers and
are not accurate in their characterization of web
traffic. In this paper, we present a self-configuring,
scalable benchmark that generates a server benchmark
load based on actual server loads. In contrast to other
web benchmarks, our benchmark focuses on request
latency instead of focusing exclusively on throughput
sensitive metrics. We present our new benchmark
hBench:Web, and demonstrate how it accurately models
the load of an actual server. The benchmark can also be
used to assess how continued growth or changes in the
workload will affect future performance. Using existing
log histories, we now that these predictions are
sufficiently realistic to provide insight into
tomorrow's Web performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmark; CGI; scaling; self-configuring; World Wide
Web",
}
@Article{Rousskov:1998:PCP,
author = "Alex Rousskov and Valery Soloviev",
title = "On performance of caching proxies (extended
abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "272--273",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277946",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Waldby:1998:TAE,
author = "J. Waldby and U. Madhow and T. V. Lakshman",
title = "Total acknowledgements (extended abstract): a robust
feedback mechanism for end-to-end congestion control",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "274--275",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277947",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "End-to-end data transport protocols have two main
functions: error recovery and congestion control. The
information required by the sender to perform these
functions is provided by acknowledgements (ACKs) from
the receiver. The Internet transport protocol, TCP/IP,
uses cumulative acknowledgements (CACKs), which provide
a robust but minimal mechanism for error recovery which
is inadequate for heterogeneous networks with random
loss. Furthermore, TCP's congestion control mechanism
is based on counting ACKs, and is therefore vulnerable
to loss of ACKs on the reverse path, particularly when
the latter may be slower than the forward path, as in
asymmetric networks. The contributions of this paper
are as follows:(a) We show that a simple enhancement of
CACK provides sufficient information for end-to-end
{\em congestion control}. We term this ACK format total
ACKs (TACKs).(b) We devise a novel ACK format that uses
TACKs for congestion control, and negative ACKs (NACKs)
for efficient error recovery. Typically, the main
concern with NACKs is that of robustness to ACK loss,
and we address this using an implementation that
provides enough redundancy to provide such
robustness.(c) We use the TACK+NACK acknowledgement
format as the basis for a new transport protocol that
provides efficient error recovery and dynamic
congestion control. The protocol provides large
performance gains over TCP in an environment with
random loss, and is robust against loss of ACKs in the
reverse path. In particular, the protocol gives high
throughput upto a designed level of random loss,
independent of the bandwidth-delay product. This is in
contrast to TCP, whose throughput deteriorates
drastically if the random loss probability is higher
than the inverse square of the bandwidth-delay
product.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Willis:1998:PCR,
author = "Thomas E. Willis and George B. {Adams III}",
title = "Portable, continuous recording of complete computer
behavior with low overhead (extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "276--277",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277948",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acharya:1998:UIM,
author = "Anurag Acharya and Sanjeev Setia",
title = "Using idle memory for data-intensive computations
(extended abstract)",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "278--279",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277858.277949",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aboutabl:1998:TDD,
author = "Mohamed Aboutabl and Ashok Agrawala and Jean-Dominique
Decotignie",
title = "Temporally determinate disk access (extended
abstract): an experimental approach",
journal = j-SIGMETRICS,
volume = "26",
number = "1",
pages = "280--281",
month = jun,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/277851.277950",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:25:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marsan:1998:MGS,
author = "M. Ajmone Marsan and G. Balbo and G. Conte and S.
Donatelli and G. Franceschinis",
title = "Modelling with {Generalized Stochastic Petri Nets}",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "2--2",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.581193",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bause:1998:SPN,
author = "Falko Bause and Pieter S. Kritzinger",
title = "Stochastic {Petri} Nets: An Introduction to the
Theory",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "2--3",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.581194",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lindemann:1998:PMD,
author = "Christoph Lindemann",
title = "Performance Modelling with Deterministic and
Stochastic {Petri} Nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "3--3",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.581195",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lindemann:1998:SIS,
author = "Christoph Lindemann",
title = "Special issue on stochastic {Petri} nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "4--4",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288201",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buchholz:1998:GHG,
author = "Peter Buchholz and Peter Kemper",
title = "On generating a hierarchy for {GSPN} analysis",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "5--14",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288202",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines the (semi) automatic generation of
a hierarchical structure for generalized stochastic
Petri nets (GSPNs). The idea is to partition a GSPN
automatically into a set of components with
asynchronous communication. Net level results obtained
by invariant computation for these subnets are used to
define a macro description of the internal state. This
yields a hierarchical structure which is exploited in
several efficient analysis algorithms. These algorithms
include reachability set/graph generation, structured
numerical analysis techniques and approximation
techniques based on decomposition and aggregation. A
GSPN model of an existing production cell and its
digital control is analyzed to demonstrate usefulness
of the approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "generalized stochastic Petri nets; hierarchical
structure; Kronecker algebra; Markov chain analysis
techniques",
}
@Article{Fricks:1998:ANM,
author = "Ricardo M. Fricks and Antonio Puliafito and Mikl{\'o}s
Telek and Kishor S. Trivedi",
title = "Applications of non-{Markovian} stochastic {Petri}
nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "15--27",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288204",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Petri nets represent a powerful paradigm for modeling
parallel and distributed systems. Parallelism and
resource contention can easily be captured and time can
be included for the analysis of system dynamic
behavior. Most popular stochastic Petri nets assume
that all firing times are exponentially distributed.
This is found to be a severe limitation in many
circumstances that require deterministic and generally
distributed firing times. This has led to a
considerable interest in studying non-Markovian models.
In this paper we specifically focus on non-Markovian
Petri nets. The analytical approach through the
solution of the underlying Markov regenerative process
is dealt with and numerical analysis techniques are
discussed. Several examples are presented and solved to
highlight the potentiality of the proposed
approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Markov regenerative processes; numerical analysis;
preemption policies; stochastic Petri nets",
}
@Article{Marsan:1998:MAS,
author = "Marco Ajmone Marsan and Rossano Gaeta",
title = "Modeling {ATM} systems with {GSPNs} and {SWNs}",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "28--37",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288208",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper overviews the work of the authors in the
field of modeling and analysis of Asynchronous Transfer
Mode (ATM) networks using Generalized Stochastic Petri
Nets (GSPN) and a special class of high-level
stochastic Petri nets known as Stochastic Well-formed
Nets (SWN). These formalisms are first shown to be
adequate tools for the development of models of ATM
systems, provided that only one timed transition is
used, together with many immediate transitions. The
only timed transition in the GSPN and SWN models
represents the ATM systems cell time, while immediate
transitions implement the ATM systems behavior. The
firing time distribution of the only timed transition
is irrelevant for the computation of several
interesting performance indices. The results, as well
as the problems, derived from the analysis of ATM
switches and Local Area Networks (LAN) that adopt the
Available Bit Rate (ABR) service category are
summarized and discussed, providing references to the
works containing the technical details.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ABR; ATM; Gauss switch; GSPN; knockout switch; LAN;
SWN",
}
@Article{Ost:1998:AWM,
author = "Alexander Ost and Boudewijn R. Haverkort",
title = "Analysis of windowing mechanisms with infinite-state
stochastic {Petri} nets",
journal = j-SIGMETRICS,
volume = "26",
number = "2",
pages = "38--46",
month = aug,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/288197.288212",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:06 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a performance evaluation of
windowing mechanisms in world-wide web applications.
Previously, such mechanisms have been studied by means
of measurements only, however, given suitable tool
support, we show that such evaluations can also be
performed conveniently using infinite-state stochastic
Petri nets. We briefly present this class of stochastic
Petri nets as well as the approach for solving the
underlying infinite-state Markov chain using
matrix-geometric methods. We then present a model of
the TCP slow-start congestion avoidance mechanism,
subject to a (recently published) typical worldwide web
workload. The model is parameterized using measurement
data for a national connection and an overseas
connection. Our study shows how the maximum congestion
window size, the connection release timeout and the
packet loss probability influence the expected number
of buffered segments at the server, the connection
setup rate and the connection time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; matrix-geometric methods;
stochastic Petri nets; window flow control",
}
@Article{Dujmovic:1998:EES,
author = "Jozo J. Dujmovi{\'c} and Ivo Dujmovi{\'c}",
title = "Evolution and evaluation of {SPEC} benchmarks",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "2--9",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a method for quantitative evaluation of
SPEC benchmarks. The method is used for the analysis of
three generations of SPEC component-level benchmarks:
SPEC89, SPEC92, and SPEC95. Our approach is suitable
for studying (1) the redundancy between individual
benchmark programs, (2) the size, completeness, density
and granularity of benchmark suites, (3) the
distribution of benchmark programs in a program space,
and (4) benchmark suite design and evolution
strategies. The presented method can be used for
designing a universal benchmark suite as the next
generation of SPEC benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cao:1998:GEI,
author = "Pei Cao and Sekhar Sarukkai",
title = "{Guest Editors}' Introduction",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "10--10",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.581196",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Caceres:1998:WPC,
author = "Ram{\'o}n C{\'a}ceres and Fred Douglis and Anja
Feldmann and Gideon Glass and Michael Rabinovich",
title = "{Web} proxy caching: the devil is in the details",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "11--15",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Much work in the analysis of proxy caching has focused
on high-level metrics such as hit rates, and has
approximated actual reference patterns by ignoring
exceptional cases such as connection aborts. Several of
these low-level details have a strong impact on
performance, particularly in heterogeneous bandwidth
environments such as modem pools connected to faster
networks. Trace-driven simulation of the modem pool of
a large ISP suggests that `cookies' dramatically affect
the cachability of resources; wasted bandwidth due to
aborted connections can more than offset the savings
from cached documents; and using a proxy to keep from
repeatedly opening new TCP connections can reduce
latency more than simply caching data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamurthy:1998:PQE,
author = "Diwakar Krishnamurthy and Jerome Rolia",
title = "Predicting the {QoS} of an electronic commerce server:
those mean percentiles",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "16--22",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a case study on Quality of Service
(QoS) measures and Service Level Agreements (SLA) for
an electronic commerce server. Electronic commerce
systems typically rely on a combination of an HTTP
server and a database server that may be integrated
with other enterprise information resources. Some
interactions with these systems cause requests for
static HTML pages. Others cause significant amounts of
database processing. Response time percentiles are
well-accepted measures of QoS for such requests. In
this paper we measure the behavior of an electronic
commerce server under several controlled loads and
study response time measures for several workload
abstractions. Response time measures are captured for
individual URLs, groups of functionally related URLs,
and for sequences of URLs. We consider the utility of
these workload abstractions for providing SLA. We also
show that empirical evidence of server behavior in
conjunction with analytic modeling techniques may be
useful to predict the 90-percentile of response times
for sequence based workload classes. The model
predictions could be used to support realtime call
admission algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bangs:1998:BOS,
author = "Gaurav Bangs and Peter Druschel and Jeffrey C. Mogul",
title = "Better operating system features for faster network
servers",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "23--30",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306234",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Widely-used operating systems provide inadequate
support for large-scale Internet server applications.
Their algorithms and interfaces fail to efficiently
support either event-driven or multi-threaded servers.
They provide poor control over the scheduling and
management of machine resources, making it difficult to
provide robust and controlled service. We propose new
UNIX interfaces to improve scalability, and to provide
fine-grained scheduling and resource management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mosberger:1998:HTM,
author = "David Mosberger and Tai Jin",
title = "{\tt httperf} --- a tool for measuring {Web} server
performance",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "31--37",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306235",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes httperf, a tool for measuring web
server performance. It provides a flexible facility for
generating various HTTP workloads and for measuring
server performance. The focus of httperf is not on
implementing one particular benchmark but on providing
a robust, high-performance tool that facilitates the
construction of both micro- and macro-level benchmarks.
The three distinguishing characteristics of httperf are
its robustness, which includes the ability to generate
and sustain server overload, support for the HTTP/1.1
protocol, and its extensibility to new workload
generators and performance measurements. In addition to
reporting on the design and implementation of httperf
this paper also discusses some of the experiences and
insights gained while realizing this tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ward:1998:ISP,
author = "Amy Ward and Peter Glynn and Kathy Richardson",
title = "{Internet} service performance failure detection",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "38--43",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing complexity of computer networks and our
increasing dependence on them means enforcing
reliability requirements is both more challenging and
more critical. The expansion of network services to
include both traditional interconnect services and
user-oriented services such as the web and email has
guaranteed both the increased complexity of networks
and the increased importance of their performance. The
first step toward increasing reliability is early
detection of network performance failures. Here we
consider the applicability of statistical model
frameworks under the most general assumptions possible.
Using measurements from corporate proxy servers, we
test the framework against real world failures. The
results of these experiments show we can detect
failures, but with some tradeoff questions. The pull is
in the warning time: either we miss early warning signs
or we report some false warnings. Finally, we offer
insight into the problem of failure diagnosis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sayal:1998:SAR,
author = "Mehmet Sayal and Yuri Breitbart and Peter Scheuermann
and Radek Vingralek",
title = "Selection algorithms for replicated {Web} servers",
journal = j-SIGMETRICS,
volume = "26",
number = "3",
pages = "44--50",
month = dec,
year = "1998",
CODEN = "????",
DOI = "https://doi.org/10.1145/306225.306238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Replication of documents on geographically distributed
servers can improve both performance and reliability of
the Web service. Server selection algorithms allow Web
clients to select one of the replicated servers which
is `close' to them and thereby minimize the response
time of the Web service. Using client proxy server
traces, we compare the effectiveness of several
`proximity' metrics including the number of hops
between the client and server, the ping round trip time
and the HTTP request latency. Based on this analysis,
we design two new algorithms for selection of
replicated servers and compare their performance
against other existing algorithms. We show that the new
server selection algorithms improve the performance of
other existing algorithms on the average by 55\%. In
addition, the new algorithms improve the performance of
the existing non-replicated Web servers on average by
69\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hillingsworth:1999:SSS,
author = "Jeffrey K. Hillingsworth and Barton P. Miller",
title = "Summary of the {SIGMETRICS Symposium on Parallel and
Distributed Processing}",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "2--12",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309749",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:1999:SIS,
author = "Kenneth C. Sevcik",
title = "Special Issue on Scheduling in Multiprogrammed
Parallel Systems",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "13--13",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.581197",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Downey:1999:EGW,
author = "Allen B. Downey and Dror G. Feitelson",
title = "The elusive goal of workload characterization",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "14--29",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309750",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The study and design of computer systems requires good
models of the workload to which these systems are
subjected. Until recently, the data necessary to build
these models---observations from production
installations---were not available, especially for
parallel computers. Instead, most models were based on
assumptions and mathematical attributes that facilitate
analysis. Recently a number of supercomputer sites have
made accounting data available that make it possible to
build realistic workload models. It is not clear,
however, how to generalize from specific observations
to an abstract model of the workload. This paper
presents observations of workloads from several
parallel supercomputers and discusses modeling issues
that have caused problems for researchers in this
area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Setia:1999:IJM,
author = "Sanjeev Setia and Mark S. Squillante and Vijay K.
Naik",
title = "The impact of job memory requirements on
gang-scheduling performance",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "30--39",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Almost all previous research on gang-scheduling has
ignored the impact of real job memory requirements on
the performance of the policy. This is despite the fact
that on parallel supercomputers, because of the
problems associated with demand paging, executing jobs
are typically allocated enough memory so that their
{\em entire address space\/} is memory-resident. In
this paper, we examine the impact of job memory
requirements on the performance of gang-scheduling
policies. We first present an analysis of the
memory-usage characteristics of jobs in the production
workload on the Cray T3E at the San Diego Supercomputer
Center. We also characterize the memory usage of some
of the applications that form part of the workload on
the LLNL ASCI supercomputer. Next, we examine the issue
of long-term scheduling on MPPs, i.e., we study
policies for deciding which jobs among a set of
competing jobs should be allocated memory and thus
should be allowed to execute on the processors of the
system. Using trace-driven simulation, we evaluate the
impact of using different long-term scheduling policies
on the overall performance of Distributed Hierarchical
Control (DHC), a gang-scheduling policy that has been
studied extensively in the research literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chan:1999:EPJ,
author = "Yuet-Ning Chan and Sivarama P. Dandamudi and
Shikharesh Majumdar",
title = "Experiences with parallel job scheduling on a
transputer system",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "40--51",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both time and space sharing strategies have been
proposed for job scheduling in multiprogrammed parallel
systems. This paper summarizes the major observations
gained from an experimental investigation of these two
partition sharing strategies on a Transputer system. A
number of factors such as the applications and their
software architectures in the multiprogramming mix, the
partition sharing strategy, and the partition size are
varied and the resulting insights into system
performance and scheduling are presented. Space sharing
is observed to produce a superior performance in
comparison to time sharing for a number of
multiprogrammed workloads. Time sharing showed a better
performance for workloads with high variability in
process execution times, and with high rates of
interprocess communication. The relationships between
system performance and a number of workload and system
characteristics are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1999:IJA,
author = "Mark S. Squillante and David D. Yao and Li Zhang",
title = "The impact of job arrival patterns on parallel
scheduling",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "52--59",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309754",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an initial analysis of the
job arrival patterns from a real parallel computing
system and we develop a class of traffic models to
characterize these arrival patterns. Our analysis of
the job arrival data illustrates traffic patterns that
exhibit heavy-tail behavior and other characteristics
which are quite different from the arrival processes
used in previous studies of parallel scheduling. We
then investigate the impact of these arrival traffic
patterns on the performance of parallel space-sharing
scheduling strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dowdy:1999:SIH,
author = "L. W. Dowdy and E. Rosti and G. Serazzi and E.
Smirni",
title = "Scheduling issues in high-performance computing",
journal = j-SIGMETRICS,
volume = "26",
number = "4",
pages = "60--69",
month = mar,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/309746.309756",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:27:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider the problem of scheduling
computational resources across a range of
high-performance systems, from tightly coupled parallel
systems to loosely coupled ones like networks of
workstations and geographically dispersed
meta-computing environments. We review the role of
architecture issues in the choice of scheduling
discipline and we present a selected set of policies
that address different aspects of the scheduling
problem. This discussion serves as the motivation for
addressing the success of academic research in
scheduling as well as its common criticisms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ribeiro:1999:SNL,
author = "Vinay J. Ribeiro and Rudolf H. Riedi and Matthew S.
Crouse and Richard G. Baraniuk",
title = "Simulation of {nonGaussian} long-range-dependent
traffic using wavelets",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "1--12",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301475",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:1999:BEC,
author = "Wei Zhao and Satish K. Tripathi",
title = "Bandwidth-efficient continuous media streaming through
optimal multiplexing",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "13--22",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; bandwidth allocation; feasible
region; multimedia streaming; multiplexing;
quality-of-service; temporal smoothing; transmission
scheduling",
}
@Article{Kumar:1999:ESS,
author = "Sanjeev Kumar and Dongming Jiang and Rohit Chandra and
Jaswinder Pal Singh",
title = "Evaluating synchronization on shared address space
multiprocessors: methodology and performance",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "23--34",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Acharya:1999:AUI,
author = "Anurag Acharya and Sanjeev Setia",
title = "Availability and utility of idle memory in workstation
clusters",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "35--46",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kaplan:1999:TRV,
author = "Scott F. Kaplan and Yannis Smaragdakis and Paul R.
Wilson",
title = "Trace reduction for virtual memory simulations",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "47--58",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301479",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Douceur:1999:LSS,
author = "John R. Douceur and William J. Bolosky",
title = "A large-scale study of file-system contents",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "59--70",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; directory hierarchy; file-system
contents; static data snapshot; workload
characterization",
}
@Article{Martin:1999:NSH,
author = "Richard P. Martin and David E. Culler",
title = "{NFS} sensitivity to high performance networks",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "71--82",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barve:1999:MOT,
author = "Rakesh Barve and Elizabeth Shriver and Phillip B.
Gibbons and Bruce K. Hillyer and Yossi Matias and
Jeffrey Scott Vitter",
title = "Modeling and optimizing {I/O} throughput of multiple
disks on a bus",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "83--92",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sethuraman:1999:OSS,
author = "Jay Sethuraman and Mark S. Squillante",
title = "Optimal stochastic scheduling in multiclass parallel
queues",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "93--102",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301483",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varki:1999:MVT,
author = "Elizabeth Varki",
title = "Mean value technique for closed fork-join networks",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "103--112",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Franaszek:1999:MFS,
author = "Peter A. Franaszek and Philip Heidelberger and Michael
Wazlowski",
title = "On management of free space in compressed memory
systems",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "113--121",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Smaragdakis:1999:ESE,
author = "Yannis Smaragdakis and Scott Kaplan and Paul Wilson",
title = "{EELRU}: simple and effective adaptive page
replacement",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "122--133",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:1999:ESP,
author = "Donghee Lee and Jongmoo Choi and Jong-Hun Kim and Sam
H. Noh and Sang Lyul Min and Yookun Cho and Chong Sang
Kim",
title = "On the existence of a spectrum of policies that
subsumes the least recently used ({LRU}) and least
frequently used ({LFU}) policies",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "134--143",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301487",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ludwig:1999:MLT,
author = "Reiner Ludwig and Bela Rathonyi and Almudena Konrad
and Kimberly Oden and Anthony Joseph",
title = "Multi-layer tracing of {TCP} over a reliable wireless
link",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "144--154",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GSM; measurement tools; TCP; wireless",
}
@Article{Anjum:1999:BDT,
author = "Farooq Anjum and Leandros Tassiulas",
title = "On the behavior of different {TCP} algorithms over a
wireless channel with correlated packet losses",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "155--165",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301550",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sripanidkulchai:1999:TPV,
author = "Kunwadee Sripanidkulchai and Andy Myers and Hui
Zhang",
title = "A third-party value-added network service approach to
reliable multicast",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "166--177",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301553",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fan:1999:WPB,
author = "Li Fan and Pei Cao and Wei Lin and Quinn Jacobson",
title = "{Web} prefetching between low-bandwidth clients and
proxies: potential and performance",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "178--187",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:1999:PEH,
author = "Paul Barford and Mark Crovella",
title = "A performance evaluation of hyper text transfer
protocols",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "188--197",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:1999:HRM,
author = "Huican Zhu and Ben Smith and Tao Yang",
title = "Hierarchical resource management for {Web} server
clusters with dynamic content",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "198--199",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liao:1999:AGS,
author = "Cheng Liao and Margaret Martonosi and Douglas W.
Clark",
title = "An adaptive globally-synchronizing clock algorithm and
its implementation on a {Myrinet}-based {PC} cluster",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "200--201",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.302127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chou:1999:PSD,
author = "ChengFu Chou and Leana Golubchik and John C. S. Lui",
title = "A performance study of dynamic replication techniques
in continuous media servers",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "202--203",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dovrolis:1999:RDS,
author = "Constantinos Dovrolis and Dimitrios Stiliadis",
title = "Relative differentiated services in the {Internet}:
issues and mechanisms",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "204--205",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bartels:1999:PLF,
author = "Gretta Bartels and Anna Karlin and Darrell Anderson
and Jeffrey Chase and Henry Levy and Geoffrey Voelker",
title = "Potentials and limitations of fault-based {Markov}
prefetching for virtual memory pages",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "206--207",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crowley:1999:UTS,
author = "Patrick Crowley and Jean-Loup Baer",
title = "On the use of trace sampling for architectural studies
of desktop applications",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "208--209",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhola:1999:WMH,
author = "Sumeer Bhola and Mustaque Ahamad",
title = "Workload modeling for highly interactive
applications",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "210--211",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Venkitaraman:1999:DEC,
author = "Narayanan Venkitaraman and Tae-eun Kim and Kang-Won
Lee",
title = "Design and evaluation of congestion control algorithms
in the future {Internet}",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "212--213",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elnozahy:1999:ATC,
author = "E. N. Elnozahy",
title = "Address trace compression through loop detection and
reduction",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "214--215",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "address traces; compression; control flow analysis;
traces",
}
@Article{Nahum:1999:PIW,
author = "Erich Nahum and Tsipora Barzilai and Dilip Kandlur",
title = "Performance issues in {WWW} servers",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "216--217",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ng:1999:SBE,
author = "T. S. Eugene Ng and Donpaul C. Stephens and Ion Stoica
and Hui Zhang",
title = "Supporting best-effort traffic with fair service
curve",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "218--219",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padhye:1999:TFR,
author = "Jitendra Padhye and Jim Kurose and Don Towsley and
Rajeev Koodli",
title = "A {TCP}-friendly rate adjustment protocol for
continuous media flows over best effort networks",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "220--221",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Downey:1999:UPE,
author = "Allen B. Downey",
title = "Using {\tt pathchar} to estimate {Internet} link
characteristics",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "222--223",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We evaluate pathchar, a tool that infers the
characteristics of links along an Internet path
(latency, bandwidth, queue delays). Looking at two
example paths, we identify circumstances where {\tt
pathchar} is likely to succeed, and develop techniques
to improve the accuracy of {\tt pathchar}'s estimates
and reduce the time it takes to generate them. The most
successful of these techniques is a form of adaptive
data collection that reduces the number of measurements
{\tt pathchar} needs by more than 90\% for some
links.\par
A full-length version of this paper is available from
\url{http://uuu.cs.colby.edu/~downey/pathchar}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hershko:1999:STS,
author = "Yuval Hershko and Daniel Segal and Hadas Shachnai",
title = "Self-tuning synchronization mechanisms in network
operating systems",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "224--225",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301583",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bose:1999:PEV,
author = "Pradip Bose",
title = "Performance evaluation and validation of
microprocessors",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "226--227",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301584",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance evaluation; processor design; validation",
}
@Article{Majumdar:1999:CMC,
author = "Shikharesh Majumdar and Dale Streibel and Bruce
Beninger and Brian Carroll and Neveenta Verma and Minru
Liu",
title = "Controlling memory contention on a scalable
multiprocessor-based telephone switch",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "228--229",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301464.301585",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cervetto:1999:MBP,
author = "Eugenio Cervetto",
title = "Model-based performance analysis of an {EDP\slash
ERP}-oriented wide area network",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "230--231",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301586",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "EDP; ERP; performance modeling; performance
prediction; wide-area network",
}
@Article{Ramanathan:1999:VSA,
author = "Srinivas Ramanathan and Edward H. Perry",
title = "The value of a systematic approach to measurement and
analysis: an {ISP} case study",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "232--233",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Siebert:1999:IPD,
author = "Janet Siebert",
title = "Improving performance of data analysis in data
warehouses: a methodology and case study",
journal = j-SIGMETRICS,
volume = "27",
number = "1",
pages = "234--235",
month = jun,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/301453.301588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:28:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data analysis; data warehouse; performance; synthetic
join; VLDB",
}
@Article{Williamson:1999:SIN,
author = "Carey Williamson",
title = "Special Issue on Network Traffic Measurements and
Workload Characterization",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "2--2",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer performance analysis, whether it be for
design, selection or improvement, has a large body of
literature to draw upon. It is surprising, however,
that few texts exist on the subject. The purpose of
this paper is to provide a feature analysis of the four
major texts suitable for professional and academic
purposes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer performance evaluation; computer system
selection",
}
@Article{Jerkins:1999:MAI,
author = "Judith L. Jerkins and John Monroe and Jonathan L.
Wang",
title = "A measurement analysis of {Internet} traffic over
frame relay",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "3--14",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A workshop on the theory and application of analytical
models to ADP system performance prediction was held on
March 12-13, 1979, at the University of Maryland. The
final agenda of the workshop is included as an
appendix. Six sessions were conducted: (1) theoretical
advances, (2) operational analysis, (3) effectiveness
of analytical modeling techniques, (4) validation, (5)
case studies and applications, and (6) modeling tools.
A summary of each session is presented below. A list of
references is provided for more detailed information.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epsilon:1999:AII,
author = "Raja Epsilon and Jun Ke and Carey Williamson",
title = "Analysis of {ISP IP\slash ATM} network traffic
measurements",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "15--24",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of a `working-set' of a program running in
a virtual memory environment is now so familiar that
many of us fail to realize just how little we really
know about what it is, what it means, and what can be
done to make such knowledge actually useful. This
follows, perhaps, from the abstract and apparently
intangible facade that tends to obscure the meaning of
working set. What we cannot measure often ranks high in
curiosity value, but ranks low in pragmatic utility.
Where we have measures, as in the page-seconds of
SMF/MVS, the situation becomes even more curious: here
a single number purports to tell us something about the
working set of a program, and maybe something about the
working sets of other concurrent programs, but not very
much about either. This paper describes a case in which
the concept of the elusive working set has been
encountered in practice, has been intensively analyzed,
and finally, has been confronted in its own realm. It
has been trapped, wrapped, and, at last, forced to
reveal itself for what it really is. It is not a
number! Yet it can be measured. And what it is,
together with its measures, turns out to be something
not only high in curiosity value, but also something
very useful as a means to predict the page faulting
behavior of a program running in a relatively complex
multiprogrammed environment. The information presented
here relates to experience gained during the conversion
of a discrete event simulation model to a hybrid model
which employs analytical techniques to forecast the
duration of `steady-state' intervals between mix-change
events in the simulation of a network-scheduled job
stream processing on a 370/168-3AP under MVS. The
specific `encounter' with the concept of working sets
came about when an analytical treatment of program
paging was incorporated into the model. As a result of
considerable luck, ingenuity, and brute-force
empiricism, the model won. Several examples of
empirically derived characteristic working set
functions, together with typical model results, are
supported with a discussion of relevant modeling
techniques and areas of application.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:1999:WCW,
author = "Martin Arlitt and Rich Friedrich and Tai Jin",
title = "Workload characterization of a {Web} proxy in a cable
modem environment",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "25--36",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discussed the problems encountered and
techniques used in conducting the performance
evaluation of a multi-processor on-line manpower data
collection system. The two main problems were: (1) a
total lack of available software tools, and (2) many
commonly used hardware monitor measures (e.g., CPU
busy, disk seek in progress) were either meaningless or
not available. The main technique used to circumvent
these problems was detailed analysis of one-word
resolution memory maps. Some additional data collection
techniques were (1) time-stamped channel measurements
used to derive some system component utilization
characteristics and (2) manual stopwatch timings used
to identify the system's terminal response times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barford:1999:MWP,
author = "Paul Barford and Mark Crovella",
title = "Measuring {Web} performance in the wide area",
journal = j-SIGMETRICS,
volume = "27",
number = "2",
pages = "37--48",
month = sep,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/1041864.1041869",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:09 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current status of an implementation of a
methodology relating load, capacity and service for IBM
MVS computer systems is presented. This methodology
encompasses systems whose workloads include batch, time
sharing and transaction processing. The implementation
includes workload classification, mix representation
and analysis, automatic benchmarking, and exhaust point
forecasting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1999:SIW,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA `99)}",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "2--2",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:1999:IPP,
author = "E. G. {Coffman, Jr.} and Ph. Robert and A. L.
Stolyar",
title = "The interval packing process of linear networks",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "3--4",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Caceres:1999:SII,
author = "R. C{\'a}ceres and N. G. Duffield and J. Horowitz and
F. Lo Presti and D. Towsley",
title = "Statistical inference of internal network loss and
topology",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "5--6",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340293",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of inference from end-to-end multicast
measurements has recently been proposed to find the
internal characteristics in a network. Here we describe
statistically rigorous methods for inferring link loss
rates, and their application to identifying the
underlying multicast topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Epema:1999:PSS,
author = "D. H. J. Epema and J. F. C. M. de Jongh",
title = "Proportional-share scheduling in single-server and
multiple-server computing systems",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "7--10",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340295",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Proportional Share Scheduling (PSS), which is the
allocation of prespecified fractions of a certain
resource to different classes of customers, has been
studied both in the context of the allocation of
network bandwidth and of processors. Much of this work
has focused on systems with a single scheduler and when
all classes of customers are constantly backlogged. We
study the objectives and performance of PSS policies
for processor scheduling when these conditions do not
hold.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertsimas:1999:PAM,
author = "Dimitris Bertsimas and David Gamarnik and John N.
Tsitsiklis",
title = "Performance analysis of multiclass queueing networks",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "11--14",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340299",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The subject of this abstract is performance analysis
of multiclass queueing networks. The objective is to
estimate steady-state queue lengths in queueing
networks, assuming a priori that the scheduling policy
implemented brings the system to a steady state, namely
is stable. We propose a very general methodology based
on Lyapunov functions, for the performance analysis of
infinite state Markov chains and apply it specifically
to multiclass exponential type queueing networks. We
use, in particular, linear and piece-wise linear
Lyapunov function to establish certain geometric type
lower and upper bounds on the tail probabilities and
bounds on expectation of the queue lengths. The results
proposed in this paper are the first that establish
geometric type upper and lower bounds on tail
probabilities of queue lengths, for networks of such
generality. The previous results on performance
analysis can in general achieve only numerical bounds
and only on expectation and not the distribution of
queue lengths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Herzog:1999:PAG,
author = "Ulrich Herzog",
title = "Process algebras are getting mature for performance
evaluation?!",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "15--18",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340303",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Designing hardware/software systems in the traditional
way we clearly separate methods for the functional
design and performance evaluation. Beside many merits
the well known insularity-problem is one of the
consequences. Therefore, in system engineering we see a
clear trend towards an integral treatment of both
aspects. We briefly summarize research results obtained
during the last decade by embedding stochastic
processes into process algebras, an advanced concept
for the design of parallel and distributed systems. The
central objective of these Stochastic Process Algebras
is the modular and hierarchical modelling and analysis
of complex systems. A general introduction and related
references from different research groups may be found
in [1, 2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gyorfi:1999:DFC,
author = "Laszlo Gyorfi and Andras Racz and Ken Duffy and John
T. Lewis and Raymond Russell and Fergal Toomey",
title = "Distribution-free confidence intervals for measurement
of effective bandwidths",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "19--19",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340304",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Juneja:1999:SHT,
author = "Sandeep Juneja and Perwez Shahabuddin",
title = "Simulating heavy tailed processes using delayed hazard
rate twisting (extended abstract)",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "20--22",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:1999:SBQ,
author = "Zhen Liu and Don Towsley",
title = "Stochastic bounds for queueing systems with multiple
{Markov} modulated sources",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "23--23",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:1999:WTM,
author = "Mark S. Squillante and David D. Yao and Li Zhang",
title = "{Web} traffic modeling and {Web} server performance
analysis",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "24--27",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bradford:1999:ESH,
author = "Jeffrey P. Bradford and Russell Quong",
title = "An empirical study on how program layout affects cache
miss rates",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "28--42",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340326",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cache miss rates are quoted for a specific program,
cache configuration, and input set; the effect of
program layout on the miss rate has largely been
ignored. This paper examines the miss variation, that
is, the variation in the miss rate for instruction and
data caches resulting from randomly generated layouts;
the layouts were generated by changing the order of the
modules on the command line when linking. This analysis
is performed for several cache sizes, lines sizes,
set-associativities, input sets, compiler versions, and
optimization levels for five programs in the SPEC92
benchmark suite. Miss rates were observed that varied
from 60\% to 180\% of the mean miss rate. We did not
observe any consistently good layouts across different
parameters; in contrast, several layouts were
consistently bad. Overall, cache line size and input
set has little effect on the miss variation, while
increasing the cache size (i.e. decreasing the miss
rate), decreasing the set-associativity, or increasing
the optimization level increases the miss variation.
For a direct-mapped cache, the results in this paper
call into question the validity of using a single
layout (i) to determine the miss rate of a given
program, (ii) to determine how a given compiler
optimization affects the miss rate, and (iii) to make
architecture design decisions based on the miss rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Moore:1999:ECE,
author = "Andrew Moore and Simon Crosby",
title = "An experimental configuration for the evaluation of
{CAC} algorithms",
journal = j-SIGMETRICS,
volume = "27",
number = "3",
pages = "43--54",
month = dec,
year = "1999",
CODEN = "????",
DOI = "https://doi.org/10.1145/340242.340327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:10 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Interest in Connection Admission Control (CAC)
algorithms stems from the need for a network user and a
network provider to forge an agreement on the Quality
of Service (QoS) for a new network connection.
Traditional evaluation of CAC algorithms has been
through simulation studies. We present an alternative
approach: an evaluation environment for CAC algorithms
that is based around an experimental test-rig. This
paper presents the architecture of the test-rig and an
evaluation of its performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2000:ECM,
author = "Martin Arlitt and Ludmila Cherkasova and John Dilley
and Rich Friedrich and Tai Jin",
title = "Evaluating content management techniques for {Web}
proxy caches",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "3--11",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346003",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The continued growth of the World-Wide Web and the
emergence of new end-user technologies such as cable
modems necessitate the use of proxy caches to reduce
latency, network traffic and Web server loads. Current
Web proxy caches utilize simple replacement policies to
determine which files to retain in the cache. We
utilize a trace of client requests to a busy Web proxy
in an ISP environment to evaluate the performance of
several existing replacement policies and of two new,
parameterless replacement policies that we introduce in
this paper. Finally, we introduce Virtual Caches, an
approach for improving the performance of the cache for
multiple metrics simultaneously.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Conti:2000:LDA,
author = "Marco Conti and Enrico Gregori and Fabio Panzieri",
title = "Load distribution among replicated {Web} servers:
{QoS}-based approach",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "12--19",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346004",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A dominant factor for the success of an Internet based
Web service is the Quality of Service (QoS) perceived
by its users. The principal QoS attributes these users
perceive include those related to the service
`responsiveness', i.e. the service availability and
timeliness. In this paper, we argue that QoS can be
provided by distributing the processing load among
replicated Web servers, and that these servers can be
geographically distributed across the Internet. In this
context, we discuss strategies for load distribution,
and summarize a number of alternative architectures
that can implement those strategies. The principal
figure of merit we use in order to assess the
effectiveness of the load distribution strategies we
discuss is the response time experienced by the
users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "load distribution; QoS; Web server",
}
@Article{Griwodz:2000:TLP,
author = "Carsten Griwodz and Michael Liepert and Michael Zink
and Ralf Steinmetz",
title = "Tune to {Lambda} patching",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "20--26",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346006",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recent paper by Hua, Cai and Sheu [7] describes {\em
Patching\/} as a technique for reducing server load in
a true video-on-demand (TVoD) system. It is a scheme
for multicast video transmissions, which outperforms
techniques such as Batching in response time and
Piggybacking in bandwidth savings for titles of medium
popularity, and probably in user satisfaction as well.
It achieves TVoD performance by buffering part of the
requested video in the receiving end-system. In a
further study, the authors give analytical and
simulation details on optimized patching windows under
the assumptions of the Grace and Greedy patching
techniques. In our view, this does not exploit fully
the calculation that was performed in that study. We
state that temporal distance between two multicast
streams for one movie should not be determined by a
client policy or simulation. Rather, it can be
calculated by the server on a per video basis, since
the server is aware of the average request interarrival
time for each video. Since we model the request
arrivals as a Poisson process, which is defined by a
single variable that is historically called $ \lambda
$, we call this variation `$ \lambda $ Patching'.
Furthermore, we present an optimization option
`Multistream Patching' that reduces the server load
further. We accept that some near video-on-demand-like
traffic is generated with additional patch streams, and
achieve additional gains in server load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive; multicast; streaming server; video on
demand",
}
@Article{Menasec:2000:RMP,
author = "Daniel A. Menas{\'e}c and Rodrigo Fonseca and Virgilio
A. F. Almeida and Marco A. Mendes",
title = "Resource management policies for e-commerce servers",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "27--35",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346009",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Quality of service of e-commerce sites has been
usually managed by the allocation of resources such as
processors, disks, and network bandwidth, and by
tracking conventional performance metrics such as
response time, throughput, and availability. However,
the metrics that are of utmost importance to the
management of a Web store are revenue and profits.
Thus, resource management schemes for e-commerce
servers should be geared towards optimizing business
metrics as opposed to conventional performance metrics.
This paper introduces a state transition graph called
Customer Behavior Model Graph (CBMG) to describe a
customer session. It then presents a family of
priority-based resource management policies for
e-commerce servers. Priorities change dynamically as a
function of the state a customer is in and as a
function of the amount of money the customer has
accumulated in his/her shopping cart. A detailed
simulation model was developed to assess the gain of
adaptive policies with respect to policies that are
oblivious to economic considerations. Simulation
results show that the adaptive priority scheme
suggested here can increase, during peak periods,
business-oriented metrics such as revenue/sec by as
much as 43\% over the non priority case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Minshall:2000:APP,
author = "Greg Minshall and Yasushi Saito and Jeffrey C. Mogul
and Ben Verghese",
title = "Application performance pitfalls and {TCP}'s {Nagle}
algorithm",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "36--44",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346012",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance improvements to networked applications can
have unintended consequences. In a study of the
performance of the Network News Transport Protocol
(NNTP), the initial results suggested it would be
useful to disable TCP's Nagle algorithm for this
application. Doing so significantly improved latencies.
However, closer observation revealed that with the
Nagle algorithm disabled, the application was
transmitting an order of magnitude more packets. We
found that proper application buffer management
significantly improves performance, but that the Nagle
algorithm still slightly increases mean latency. We
suggest that modifying the Nagle algorithm would
eliminate this cost.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Roadknight:2000:FPC,
author = "Chris Roadknight and Ian Marshall and Debbie Vearer",
title = "File popularity characterisation",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "45--50",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346014",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A key determinant of the effectiveness of a web cache
is the locality of the files requested. In the past
this has been difficult to model, as locality appears
to be cache specific. We show that locality can be
characterised with a single parameter, which primarily
varies with the topological position of the cache, and
is largely independent of the culture of the cache
users. Accurate cache models can therefore be built
without any need to consider cultural effects that are
hard to predict.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file popularity; web caches",
}
@Article{Tomlinson:2000:HCI,
author = "Gary Tomlinson and Drew Major and Ron Lee",
title = "High-capacity {Internet} middleware: {Internet}
caching system architectural overview",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "51--56",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346017",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous studies measuring the performance of
general-purpose operating systems running large-scale
Internet server applications, such as proxy caches,
have identified design deficiencies that contribute to
lower than expected performance and scalability. This
paper introduces a high-capacity proxy cache service
built upon a specialized operating system designed to
efficiently support large-scale Internet middleware. It
suggests that specialized operating systems can better
meet the needs of these services than can their
general-purpose counterparts. It concludes with the
measured performance and scalability of this proxy
cache service.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanderMei:2000:DSS,
author = "R. D. van der Mei and W. K. Ehrlich and P. K. Reeser
and J. P. Francisco",
title = "A decision support system for tuning {Web} servers in
distributed object oriented network architectures",
journal = j-SIGMETRICS,
volume = "27",
number = "4",
pages = "57--62",
month = mar,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/346000.346020",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:30:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web technologies are currently being employed to
provide end user interfaces in diverse computing
environments. The core element of these Web solutions
is a Web server that is based on the Hypertext Transfer
Protocol (HTTP) running over TCP/IP. Web servers are
required to respond to millions of transaction requests
per day at an `acceptable' Quality of Service (QoS)
level with respect to the end-to-end response time and
the server throughput. In many applications, the server
performs significant server-side processing in
distributed, object-oriented (OO) computing
environments. In these applications, a Web server
retrieves a file, parses the file for scripting
language content, interprets the scripting statements
and then executes embedded code, possibly requiring a
TCP connection to a remote application for data
transfer. In this paper, we present an end-to-end model
that addresses this new class of Web servers that
engage in OO computing. We have implemented the model
in a simulation tool. Performance predictions based on
the simulations are shown to match well with
performance observed in a test environment. Therefore,
the model forms an excellent basis for a Decision
Support System for system architects, allowing them to
predict the behavior of systems prior to their
creation, or the behavior of existing systems under new
load scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "architecture; computing; configuration tuning;
Decision Support System; distributed; HTTP; httpd;
object-oriented; performance; Web server; World Wide
Web",
}
@Article{Chu:2000:CES,
author = "Yang-hua Chu and Sanjay G. Rao and Hui Zhang",
title = "A case for end system multicast (keynote address)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "1--12",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339337",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The conventional wisdom has been that IP is the
natural protocol layer for implementing multicast
related functionality. However, ten years after its
initial proposal, IP Multicast is still plagued with
concerns pertaining to scalability, network management,
deployment and support for higher layer functionality
such as error, flow and congestion control. In this
paper, we explore an alternative architecture for small
and sparse groups, where end systems implement all
multicast related functionality including membership
management and packet replication. We call such a
scheme End System Multicast. This shifting of multicast
support from routers to end systems has the potential
to address most problems associated with IP Multicast.
However, the key concern is the performance penalty
associated with such a model. In particular, End System
Multicast introduces duplicate packets on physical
links and incurs larger end-to-end delay than IP
Multicast. In this paper, we study this question in the
context of the Narada protocol. In Narada, end systems
self-organize into an overlay structure using a fully
distributed protocol. In addition, Narada attempts to
optimize the efficiency of the overlay based on
end-to-end measurements. We present details of Narada
and evaluate it using both simulation and Internet
experiments. Preliminary results are encouraging. In
most simulations and Internet experiments, the delay
and bandwidth penalty are low. We believe the potential
benefits of repartitioning multicast functionality
between end systems and routers significantly outweigh
the performance penalty incurred.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Legout:2000:PFC,
author = "A. Legout and E. W. Biersack",
title = "{PLM}: fast convergence for cumulative layered
multicast transmission schemes",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "13--22",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339340",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major challenge in the Internet is to deliver live
audio/video content with a good quality and to transfer
files to large number of heterogeneous receivers.
Multicast and cumulative layered transmission are two
mechanisms of interest to accomplish this task
efficiently. However, protocols using these mechanisms
suffer from slow convergence time, lack of
inter-protocol fairness or TCP-fairness, and loss
induced by the join experiments.In this paper we define
and investigate the properties of a new multicast
congestion control protocol (called PLM) for
audio/video and file transfer applications based on a
cumulative layered multicast transmission. A
fundamental contribution of this paper is the
introduction and evaluation of a new and efficient
technique based on packet pair to infer which layers to
join. We evaluated PLM for a large variety of scenarios
and show that it converges fast to the optimal link
utilization, induces no loss to track the available
bandwidth, has inter-protocol fairness and
TCP-fairness, and scales with the number of receivers
and the number of sessions. Moreover, all these
properties hold in self similar and multifractal
environment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity inference; congestion control; cumulative
layers; FS-paradigm; mulitcast; packet pair",
}
@Article{Sahu:2000:ASD,
author = "Sambit Sahu and Philippe Nain and Christophe Diot and
Victor Firoiu and Don Towsley and Don Iowsley",
title = "On achievable service differentiation with token
bucket marking for {TCP}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "23--33",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339342",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Differentiated services (diffserv) architecture
has been proposed as a scalable solution for providing
service differentiation among flows without any
per-flow buffer management inside the core of the
network. It has been advocated that it is feasible to
provide service differentiation among a set of flows by
choosing an appropriate ``marking profile'' for each
flow. In this paper, we examine (i) whether it is
possible to provide service differentiation among a set
of TCP flows by choosing appropriate marking profiles
for each flow, (ii) under what circumstances, the
marking profiles are able to influence the service that
a TCP flow receives, and, (iii) how to choose a correct
profile to achieve a given service level. We derive a
simple, and yet accurate, analytical model for
determining the achieved rate of a TCP flow when
edge-routers use ``token bucket'' packet marking and
core-routers use active queue management for
preferential packet dropping. From our study, we
observe three important results: (i) the achieved rate
is not proportional to the assured rate, (ii) it is not
always possible to achieve the assured rate and, (iii)
there exist ranges of values of the achieved rate for
which token bucket parameters have no influence. We
find that it is not easy to regulate the service level
achieved by a TCP flow by solely setting the profile
parameters. In addition, we derive conditions that
determine when the bucket size influences the achieved
rate, and rates that can be achieved and those that
cannot. Our study provides insight for choosing
appropriate token bucket parameters for the achievable
rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bolosky:2000:FSD,
author = "William J. Bolosky and John R. Douceur and David Ely
and Marvin Theimer",
title = "Feasibility of a serverless distributed file system
deployed on an existing set of desktop {PCs}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "34--43",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an architecture for a serverless
distributed file system that does not assume mutual
trust among the client computers. The system provides
security, availability, and reliability by distributing
multiple encrypted replicas of each file among the
client machines. To assess the feasibility of deploying
this system on an existing desktop infrastructure, we
measure and analyze a large set of client machines in a
commercial environment. In particular, we measure and
report results on disk usage and content; file
activity; and machine uptimes, lifetimes, and loads. We
conclude that the measured desktop infrastructure would
passably support our proposed system, providing
availability on the order of one unfilled file request
per user per thousand days.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; availability; feasibility
analysis; personal computer usage data; reliability;
security; serverless distributed file system
architecture; trust; workload characterization",
}
@Article{Santos:2000:CRD,
author = "Jose Renato Santos and Richard R. Muntz and Berthier
Ribeiro-Neto",
title = "Comparing random data allocation and data striping in
multimedia servers",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "44--55",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We compare performance of a multimedia storage server
based on a random data allocation layout and block
replication with traditional data striping techniques.
Data striping techniques in multimedia servers are
often designed for restricted workloads, e.g.
sequential access patterns with CBR (constant bit rate)
requirements. On the other hand, a system based on
random data allocation can support virtually any type
of multimedia application, including VBR (variable bit
rate) video or audio, and interactive applications with
unpredictable access patterns, such as 3D interactive
virtual worlds, interactive scientific visualizations,
etc. Surprisingly, our results show that system
performance with random data allocation is competitive
and sometimes even outperforms traditional data
striping techniques, for the workloads for which data
striping is designed to work best; i.e. streams with
sequential access patterns and CBR requirements. Due to
its superiority in supporting general workloads and
competitive system performance, we believe that random
data allocation will be the scheme of choice for next
generation multimedia servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Griffin:2000:MPM,
author = "John Linwood Griffin and Steven W. Schlosser and
Gregory R. Ganger and David F. Nagle",
title = "Modeling and performance of {MEMS}-based storage
devices",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "56--65",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339354",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MEMS-based storage devices are seen by many as
promising alternatives to disk drives. Fabricated using
conventional CMOS processes, MEMS-based storage
consists of thousands of small, mechanical probe tips
that access gigabytes of high-density, nonvolatile
magnetic storage. This paper takes a first step towards
understanding the performance characteristics of these
devices by mapping them onto a disk-like metaphor.
Using simulation models based on the mechanics
equations governing the devices' operation, this work
explores how different physical characteristics (e.g.,
actuator forces and per-tip data rates) impact the
design trade-offs and performance of MEMS-based
storage. Overall results indicate that average access
times for MEMS-based storage are 6.5 times faster than
for a modern disk (1.5 ms vs. 9.7 ms). Results from
filesystem and database bench-marks show that this
improvement reduces application I/O stall times up to
70\%, resulting in overall performance improvements of
3X.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raunak:2000:IPC,
author = "Mohammad S. Raunak and Prashant Shenoy and Pawan Goyal
and Krithi Ramamritham",
title = "Implications of proxy caching for provisioning
networks and servers",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "66--77",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339357",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine the potential benefits of
web proxy caches in improving the effective capacity of
servers and networks. Since networks and servers are
typically provisioned based on a high percentile of the
load, we focus on the effects of proxy caching on the
tail of the load distribution. We find that, unlike
their substantial impact on the average load, proxies
have a diminished impact on the tail of the load
distribution. The exact reduction in the tail and the
corresponding capacity savings depend on the percentile
of the load distribution chosen for provisioning
networks and servers --- the higher the percentile, the
smaller the savings. In particular, compared to over a
50\% reduction in the average load, the savings in
network and server capacity is only 20-35\% for the
99th percentile of the load distribution. We also find
that while proxies can be somewhat useful in smoothing
out some of the burstiness in web workloads; the
resulting workload continues, however, to exhibit
substantial burstiness and a heavy-tailed nature. We
identify large objects with poor locality to be the
limiting factor that diminishes the impact of proxies
on the tail of load distribution. We conclude that,
while proxies are immensely useful to users due to the
reduction in the average response time, they are less
effective in improving the capacities of networks and
servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2000:CWC,
author = "Jiong Yang and Wei Wang and Richard Muntz",
title = "Collaborative {Web} caching based on proxy
affinities",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "78--89",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the exponential growth of hosts and traffic
workloads on the Internet, collaborative web caching
has been recognized as an efficient solution to
alleviate web page server bottlenecks and reduce
traffic. However, cache discovery, i.e., locating where
a page is cached, is a challenging problem, especially
in the fast growing World Wide Web environment, where
the number of participating proxies can be very large.
In this paper, we propose a new scheme which employs
proxy affinities to maintain a dynamic distributed
collaborative caching infrastructure. Web pages are
partitioned into clusters according to proxy reference
patterns. All proxies which frequently access some
page(s) in the same web page cluster form an
``information group''. When web pages belonging to a
web page cluster are deleted from or added into a
proxy's cache, only proxies in the associated
information group are notified. This scheme can be
shown to greatly reduce the number of messages and
other overhead on individual proxies while maintaining
a high cache hit rate. Finally, we employ trace driven
simulation to evaluate our web caching scheme using
three web access trace logs to verify that our caching
structure can provide significant benefits on real
workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aron:2000:CRM,
author = "Mohit Aron and Peter Druschel and Willy Zwaenepoel",
title = "Cluster reserves: a mechanism for resource management
in cluster-based network servers",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "90--101",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In network (e.g., Web) servers, it is often desirable
to isolate the performance of different classes of
requests from each other. That is, one seeks to achieve
that a certain minimal proportion of server resources
are available for a class of requests, independent of
the load imposed by other requests. Recent work
demonstrates how to achieve this performance isolation
in servers consisting of a single, centralized node;
however, achieving performance isolation in a
distributed, cluster based server remains a
problem.This paper introduces a new abstraction, the
cluster reserve, which represents a resource principal
in a cluster based network server. We present a design
and evaluate a prototype implementation that extends
existing techniques for performance isolation on a
single node server to cluster based servers.In our
design, the dynamic cluster-wide resource management
problem is formulated as a constrained optimization
problem, with the resource allocations on individual
machines as independent variables, and the desired
cluster-wide resource allocations as constraints.
Periodically collected resource usages serve as further
inputs to the problem.Experimental results show that
cluster reserves are effective in providing performance
isolation in cluster based servers. We demonstrate
that, in a number of different scenarios, cluster
reserves are effective in ensuring performance
isolation while enabling high utilization of the server
resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barakat:2000:APS,
author = "Chadi Barakat and Eitan Altman",
title = "Analysis of the phenomenon of several slow start
phases in {TCP} (poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "102--103",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339388",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wong:2000:PGQ,
author = "Wai-Man R. Wong and Richard R. Muntz",
title = "Providing guaranteed quality of service for
interactive visualization applications (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "104--105",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339389",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2000:IMF,
author = "Xin Wang and C. Yu and Henning Schulzrinne and Paul
Stirpe and Wei Wu",
title = "{IP} multicast fault recovery in {PIM} over {OSPF}
(poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "106--107",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339390",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lety:2000:CBM,
author = "Emmanuel L{\'e}ty and Thierry Turletti and
Fran{\c{c}}ois Baccelli",
title = "{Cell}-based multicast grouping in large-scale virtual
environments (poster session) (extended abstract)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "108--109",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339392",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2000:TLW,
author = "Shudong Jin and Azer Bestavros",
title = "Temporal locality in {Web} request streams (poster
session) (extended abstract): sources, characteristics,
and caching implications",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "110--111",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339393",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schindler:2000:ADD,
author = "Jiri Schindler and Gregory R. Ganger",
title = "Automated disk drive characterization (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "112--113",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "DIXtrac is a program that automatically characterizes
the performance of modern disk drives. This extended
abstract overviews the contents of [3], which describes
and validates DIXtrac's algorithms for extracting
accurate values for over 100 performance-critical
parameters in 2-6 minutes without human intervention or
special hardware support. The extracted data includes
detailed layout and geometry information, mechanical
timings, cache management policies, and command
processing overheads. DIXtrac is validated by
configuring a detailed disk simulator with its
extracted parameters; in most cases, the resulting
accuracies match those of the most accurate disk
simulators reported in the literature. To date, DIXtrac
has been successfully used on ten different models from
four different manufacturers. A growing database of
validated disk characteristics is available in DiskSim
[1] format at
http://www.ece.cmu.edu/~ganger/disksim/diskspecs.html.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fang:2000:OSP,
author = "Zhen Fang and Lixin Zhang and John Carter and Sally
McKee and Wilson Hsieh",
title = "Online superpage promotion revisited (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "114--115",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nikolaidis:2000:ILL,
author = "Ioanis Nikolaidis and Fulu Li and Ailan Hu",
title = "An inherently loss-less and bandwidth-efficient
periodic broadcast scheme for {VBR} video (poster
session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "116--117",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Koksal:2000:AST,
author = "Can Emre Koksal and Hisham Kassab and Hari
Balakrishnan",
title = "An analysis of short-term fairness in wireless media
access protocols (poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "118--119",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joshi:2000:RDH,
author = "Srinath R. Joshi and Injong Rhee",
title = "{RESCU}: dynamic hybrid packet-loss recovery for video
transmission over the {Internet} (poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "120--121",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339403",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The current Internet is not reliable; packet loss
rates are frequently high, and varying over time.
Transmitting high-quality interactive video over the
Internet is challenging because the quality of
compressed video is very susceptible to packet losses.
Loss of packets belonging to a video frame manifests
itself not only in the reduced quality of that frame
but also in the propagation of that distortion to
successive frames. This error propagation problem is
inherent in many motion-based video codecs due to the
interdependence of encoded video frames. This paper
presents a dynamic loss recovery scheme, called RESCU,
to address the error propagation problem. In this new
scheme, picture coding patterns are dynamically adapted
to current network conditions in order to maximize the
effectiveness of hybrid transport level recovery
(employing both forward error correction and
retransmission) in reducing error propagation. Since
RESCU does not introduce any playout delay at the
receiver, it is suitable for interactive video
communication. An experimental study based on actual
Internet transmission traces representing various
network conditions shows that dynamic hybrid RESCU
exhibits better error resilience and incurs much less
bit overhead than existing error recovery techniques
such as NEWPRED and Intra-H.261.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padmanabhan:2000:CAD,
author = "Venkata N. Padmanabhan and Lili Qiu",
title = "The content and access dynamics of a busy {Web} server
(poster session)",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "122--123",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the MSNBC Web site, one of the busiest in the
Internet today. We analyze the dynamics of content
creation and modification as well as client accesses.
Our key findings are (a) files tend to change little
upon modification, (b) a small set of files get
modified repeatedly, (c) file popularity follows a
Zipf-like distribution with an $ \alpha $ much larger
than reported in previous, proxy-based studies, and (d)
there is significant temporal stability in file
popularity but not much stability in the domains from
which popular content is accessed. We discuss
implications of these findings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:2000:TPB,
author = "Eitan Altman and Konstantin Avrachenkov and Chadi
Barakat",
title = "{TCP} in presence of bursty losses",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "124--133",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.350541",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Martin:2000:IDR,
author = "Jim Martin and Arne Nilsson and Injong Rhee",
title = "The incremental deployability of {RTT}-based
congestion avoidance for high speed {TCP Internet}
connections",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "134--144",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our research focuses on end-to-end congestion
avoidance algorithms that use round trip time (RTT)
fluctuations as an indicator of the level of network
congestion. The algorithms are referred to as
delay-based congestion avoidance or DCA. Due to the
economics associated with deploying change within an
existing network, we are interested in an incrementally
deployable enhancement to the TCP/Reno protocol. For
instance, TCP/Vegas, a DCA algorithm, has been proposed
as an incremental enhancement. Requiring relatively
minor modifications to a TCP sender, TCP/Vegas has been
shown to increase end-to-end TCP throughput primarily
by avoiding packet loss. We study DCA in today's best
effort Internet where IP switches are subject to
thousands of TCP flows resulting in congestion with
time scales that span orders of magnitude. Our results
suggest that RTT-based congestion avoidance may not be
reliably incrementally deployed in this environment.
Through extensive measurement and simulation, we find
that when TCP/DCA (i.e., a TCP/Reno sender that is
extended with DCA) is deployed over a high speed
Internet path, the flow generally experiences degraded
throughput compared to an unmodified TCP/Reno flow. We
show (1) that the congestion information contained in
RTT samples is not sufficient to predict packet loss
reliably and (2) that the congestion avoidance in
response to delay increase has minimal impact on the
congestion level over the path when the total DCA
traffic at the bottleneck consumes less than 10\% of
the bottleneck bandwidth.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion avoidance; RTT measurement; TCP",
}
@Article{Rubenstein:2000:DSC,
author = "Dan Rubenstein and Jim Kurose and Don Towsley",
title = "Detecting shared congestion of flows via end-to-end
measurement",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "145--155",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current Internet congestion control protocols operate
independently on a per-flow basis. Recent work has
demonstrated that cooperative congestion control
strategies between flows can improve performance for a
variety of applications, ranging from aggregated TCP
transmissions to multiple-sender multicast
applications. However, in order for this cooperation to
be effective, one must first identify the flows that
are congested at the same set of resources. In this
paper, we present techniques based on loss or delay
observations at end-hosts to infer whether or not two
flows experiencing congestion are congested at the same
network resources. We validate these techniques via
queueing analysis, simulation, and experimentation
within the Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2000:MAL,
author = "Xin Wang and Henning Schulzrinne and Dilip Kandlur and
Dinesh Verma",
title = "Measurement and analysis of {LDAP} performance",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "156--165",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339412",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cleveland:2000:IPG,
author = "William S. Cleveland and Dong Lin and Don X. Sun",
title = "{IP} packet generation: statistical models for {TCP}
start times based on connection-rate superposition",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "166--177",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TCP start times for HTTP are nonstationary. The
nonstationarity occurs because the start times on a
link, a point process, are a superposition of source
traffic point processes, and the statistics of
superposition changes as the number of superposed
processes changes. The start time rate is a measure of
the number of traffic sources. The univariate
distribution of the inter-arrival times is
approximately Weibull, and as the rate increases, the
Weibull shape parameter goes to 1, an exponential
distribution. The autocorrelation of the log
inter-arrival times is described by a simple,
two-parameter process: white noise plus a long-range
persistent time series. As the rate increases, the
variance of the persistent series tends to zero, so the
log times tend to white noise. A parsimonious
statistical model for log inter-arrivals accounts for
the autocorrelation, the Weibull distribution, and the
nonstationarity in the two with the rate. The model,
whose purpose is to provide stochastic input to a
network simulator, has the desirable property that the
superposition point process is generated as a single
stream. The parameters of the model are functions of
the rate, so to generate start times, only the rate is
specified. As the rate increases, the model tends to a
Poisson process. These results arise from theoretical
and empirical study based on the concept of
connection-rate superposition. The theory is the
mathematics of superposed point processes, and the
empiricism is an analysis of 23 million TCP connections
organized into 10704 blocks of approximately 15 minutes
each.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hegde:2000:ISH,
author = "Nidhi Hegde and Khosrow Sohraby",
title = "On the impact of soft hand-off in cellular systems",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "178--187",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339414",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a model for soft, hand-off in wireless
cellular networks. In such networks, due to overlapping
cells, hand-offs are not instantaneous and multiple
channels may be occupied by a single mobile for a
non-zero freeze time period.We provide a mathematical
model of wireless cellular networks with soft
hand-offs. We examine different performance measures
and show that freeze time may have a major impact on
the system performance if the mobility rate is not
negligible. Both exact and approximate formulations are
given. Different fixed-point approximation methods are
used to reduce the complexity of the exact solution.
Various performance measures such as new and hand-off
blocking and probability of a call dropout are
carefully examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shakkottai:2000:DAP,
author = "Sanjay Shakkottai and R. Srikant",
title = "Delay asymptotics for a priority queueing system",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "188--195",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339415",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study discrete-time priority
queueing systems fed by a large number of arrival
streams. We first provide bounds on the actual delay
asymptote in terms of the virtual delay asymptote.
Then, under suitable assumptions on the arrival process
to the queue, we show that these asymptotes are the
same. We then consider a priority queueing system with
two queues. Using the earlier result, we derive an
upper bound on the tail probability of the delay. Under
certain assumptions on the rate function of the arrival
process, we show that the upper bound is tight. We then
consider a system with Markovian arrivals and
numerically evaluate the delay tail probability and
validate these results with simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2000:FAI,
author = "Leana Golubchik and John C. S. Lui",
title = "A fast and accurate iterative solution of a
multi-class threshold-based queueing system with
hysteresis",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "196--206",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339416",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our main goal in this work is to develop an efficient
method for solving such models and computing the
corresponding performance measures of interest, which
can subsequently be used in evaluating designs of
threshold-based systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miner:2000:UES,
author = "Andrew S. Miner and Gianfranco Ciardo and Susanna
Donatelli",
title = "Using the exact state space of a {Markov} model to
compute approximate stationary measures",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "207--216",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339417",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a new approximation algorithm based on an
exact representation of the state space $S$, using
decision diagrams, and of the transition rate matrix
$R$, using Kronecker algebra, for a Markov model with
$K$ submodels. Our algorithm builds and solves $K$
Markov chains, each corresponding to a different
aggregation of the exact process, guided by the
structure of the decision diagram, and iterates on
their solution until their entries are stable. We prove
that exact results are obtained if the overall model
has a product-form solution. Advantages of our method
include good accuracy, low memory requirements, fast
execution times, and a high degree of automation, since
the only additional information required to apply it is
a partition of the model into the $K$ submodels. As far
as we know, this is the first time an approximation
algorithm has been proposed where knowledge of the
exact state space is explicitly used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eager:2000:ATH,
author = "Derek L. Eager and Daniel J. Sorin and Mary K.
Vernon",
title = "{AMVA} techniques for high service time variability",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "217--228",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339418",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by experience gained during the validation
of a recent Approximate Mean Value Analysis (AMVA)
model of modern shared memory architectures, this paper
re-examines the ``standard'' AMVA approximation for
non-exponential FCFS queues. We find that this
approximation is often inaccurate for FCFS queues with
high service time variability. For such queues, we
propose and evaluate: (1) AMVA estimates of the mean
residual service time at an arrival instant that are
much more accurate than the standard AMVA estimate, (2)
a new AMVA technique that provides a much more accurate
estimate of mean center residence time than the
standard AMVA estimate, and (3) a new AMVA technique
for computing the mean residence time at a
``downstream'' queue which has a more bursty arrival
process than is assumed in the standard AMVA equations.
Together, these new techniques increase the range of
applications to which AMVA may be fruitfully applied,
so that for example, the memory system architecture of
shared memory systems with complex modern processors
can be analyzed with these computationally efficient
methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ofelt:2000:EPP,
author = "David Ofelt and John L. Hennessy",
title = "Efficient performance prediction for modern
microprocessors",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "229--239",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339419",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Generating an accurate estimate of the performance of
a program on a given system is important to a large
number of people. Computer architects, compiler
writers, and developers all need insight into a
machine's performance. There are a number of
performance estimation techniques in use, from
profile-based approaches to full machine simulation.
This paper discusses a profile-based performance
estimation technique that uses a lightweight
instrumentation phase that runs in order number of
dynamic instructions, followed by an analysis phase
that runs in roughly order number of static
instructions. This technique accurately predicts the
performance of the core pipeline of a detailed
out-of-order issue processor model while scheduling far
fewer instructions than does full simulation. The
difference between the predicted execution time and the
time obtained from full simulation is only a few
percent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Endo:2000:IIP,
author = "Yasuhiro Endo and Margo Seltzer",
title = "Improving interactive performance using {TIPME}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "240--251",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339420",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "On the vast majority of today's computers, the
dominant form of computation is GUI-based user
interaction. In such an environment, the user's
perception is the final arbiter of performance.
Human-factors research shows that a user's perception
of performance is affected by unexpectedly long delays.
However, most performance-tuning techniques currently
rely on throughput-sensitive benchmarks. While these
techniques improve the average performance of the
system, they do little to detect or eliminate
response-time variabilities --- in particular,
unexpectedly long delays.We introduce a measurement
infrastructure that allows us to improve user-perceived
performance by helping us to identify and eliminate the
causes of the unexpected long response times that users
find unacceptable. We describe TIPME (The Interactive
Performance Monitoring Environment), a collection of
measurement tools that allowed us to quickly and easily
diagnose interactive performance ``bugs'' in a mature
operating system. We present two case studies that
demonstrate the effectiveness of our measurement
infrastructure. Each of the performance problems we
identify drastically affects variability in response
time in a mature system, demonstrating that current
tuning techniques do not address this class of
performance problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "interactive performance; monitoring",
}
@Article{Farkas:2000:QEC,
author = "Keith I. Farkas and Jason Flinn and Godmar Back and
Dirk Grunwald and Jennifer M. Anderson",
title = "Quantifying the energy consumption of a pocket
computer and a {Java Virtual Machine}",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "252--263",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339421",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine the energy consumption of a
state-of-the-art pocket computer. Using a data
acquisition system, we measure the energy consumption
of the Itsy Pocket Computer, developed by Compaq
Computer Corporation's Palo Alto Research Labs. We
begin by showing that the energy usage characteristics
of the Itsy differ markedly from that of a notebook
computer. Then, since we expect that flexible software
environments will become increasingly prevalent on
pocket computers, we consider applications running in a
Java environment. In particular, we explain some of the
Java design tradeoffs applicable to pocket computers,
and quantify their energy costs. For the design options
we considered and the three workloads we studied, we
find a maximum change in energy use of 25\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2000:MSB,
author = "Jin-Soo Kim and Yarsun Hsu",
title = "Memory system behavior of {Java} programs: methodology
and analysis",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "264--274",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339422",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the memory system behavior of Java
programs by analyzing memory reference traces of
several SPECjvm98 applications running with a
Just-In-Time (JIT) compiler. Trace information is
collected by an exception-based tracing tool called
JTRACE, without any instrumentation to the Java
programs or the JIT compiler.First, we find that the
overall cache miss ratio is increased due to garbage
collection, which suffers from higher cache misses
compared to the application. We also note that going
beyond 2-way cache associativity improves the cache
miss ratio marginally. Second, we observe that Java
programs generate a substantial amount of short-lived
objects. However, the size of frequently-referenced
long-lived objects is more important to the cache
performance, because it tends to determine the
application's working set size. Finally, we note that
the default heap configuration which starts from a
small initial heap size is very inefficient since it
invokes a garbage collector frequently. Although the
direct costs of garbage collection decrease as we
increase the available heap size, there exists an
optimal heap size which minimizes the total execution
time due to the interaction with the virtual memory
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Karlsson:2000:AMW,
author = "Magnus Karlsson and Per Stenstr{\"o}m",
title = "An analytical model of the working-set sizes in
decision-support systems",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "275--285",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an analytical model to study how
working sets scale with database size and other
applications parameters in decision-support systems
(DSS). The model uses application parameters, that are
measured on down-scaled database executions, to predict
cache miss ratios for executions of large databases.By
applying the model to two database engines and typical
DSS queries we find that, even for large databases, the
most performance-critical working set is small and is
caused by the instructions and private data that are
required to access a single tuple. Consequently, its
size is not affected by the database size.
Surprisingly, database data may also exhibit temporal
locality but the size of its working set critically
depends on the structure of the query, the method of
scanning, and the size and the content of the
database.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2000:TAF,
author = "Jongmoo Choi and Sam H. Noh and Sang Lyul Min and
Yookun Cho",
title = "Towards application\slash file-level characterization
of block references: a case for fine-grained buffer
management",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "286--295",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339424",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two contributions are made in this paper. First, we
show that system level characterization of file block
references is inadequate for maximizing buffer cache
performance. We show that a finer-grained
characterization approach is needed. Though application
level characterization methods have been proposed, this
is the first attempt, to the best of our knowledge, to
consider file level characterizations. We propose an
Application/File-level Characterization (AFC) scheme
where we detect on-line the reference characteristics
at the application level and then at the file level, if
necessary. The results of this characterization are
used to employ appropriate replacement policies in the
buffer cache to maximize performance. The second
contribution is in proposing an efficient and fair
buffer allocation scheme. Application or file level
resource management is infeasible unless there exists
an allocation scheme that is efficient and fair. We
propose the $ \Delta $ HIT allocation scheme that takes
away a block from the application/file where the
removal results in the smallest reduction in the number
of expected buffer cache hits. Both the AFC and $
\Delta $ HIT schemes are on-line schemes that detect
and allocate as applications execute. Experiments using
trace-driven simulations show that substantial
performance improvements can be made. For single
application executions the hit ratio increased an
average of 13 percentage points compared to the LRU
policy, with a maximum increase of 59 percentage
points, while for multiple application executions, the
increase is an average of 12 percentage points, with a
maximum of 32 percentage points for the workloads
considered.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kodialam:2000:OMR,
author = "Murali S. Kodialam and T. V. Lakshman and Sudipta
Sengupta",
title = "Online multicast routing with bandwidth guarantees: a
new approach using multicast network flow",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "296--306",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a new algorithm for on-line
routing of bandwidth-guaranteed multicasts where
routing requests arrive one-by-one without there being
any a priori knowledge of future requests. A multicast
routing request consists of a source $s$, a set of
receivers $R$, and a bandwidth requirement $b$. This
multicast routing problem arises in many contexts. Two
applications of interest are routing of
point-to-multipoint label-switched paths in
Multi-Protocol Label Switched (MPLS) networks, and the
provision of bandwidth guaranteed Virtual Private
Network (VPN) services under the ``hose'' service model
[17]. Offline multicast routing algorithms cannot be
used since they require a priori knowledge of all
multicast requests that are to be routed. Instead,
on-line algorithms that handle requests arriving
one-by-one and that satisfy as many potential future
demands as possible are needed. The newly developed
algorithm is an on-line algorithm and is based on the
idea that a newly routed multicast must follow a route
that does not ``interfere too much'' with network paths
that may be critical to satisfy future demands. We
develop a multicast tree selection heuristic that is
based on the idea of deferred loading of certain
``critical'' links. These critical links are identified
by the algorithm as links that, if heavily loaded,
would make it impossible to satisfy future demands
between certain ingress-egress pairs. The presented
algorithm uses link-state information and some
auxiliary capacity information for multicast tree
selection and is amenable to distributed
implementation. Unlike previous algorithms, the
proposed algorithm exploits any available knowledge of
the network ingress-egress points of potential future
demands even though the demands themselves are unknown
and performs very well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multicast routing; network flow; Steiner tree; traffic
engineering",
}
@Article{Gao:2000:SIR,
author = "Lixin Gao and Jennifer Rexford",
title = "Stable {Internet} routing without global
coordination",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "307--317",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/345063.339426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Border Gateway Protocol (BGP) allows an autonomous
system (AS) to apply diverse local policies for
selecting routes and propagating reachability
information to other domains. However, BGP permits ASes
to have conflicting policies that can lead to routing
instability. This paper proposes a set of guidelines
for an AS to follow in setting its routing policies,
without requiring coordination with other ASes. Our
approach exploits the Internet's hierarchical structure
and the commercial relationships between ASes to impose
a partial order on the set of routes to each
destination. The guidelines conform to conventional
traffic-engineering practices of ISPs, and provide each
AS with significant flexibility in selecting its local
policies. Furthermore, the guidelines ensure route
convergence even under changes in the topology and
routing policies. Drawing on a formal model of BGP, we
prove that following our proposed policy guidelines
guarantees route convergence. We also describe how our
methodology can be applied to new types of
relationships between ASes, how to verify the
hierarchical AS relationships, and how to realize our
policy guidelines. Our approach has significant
practical value since it preserves the ability of each
AS to apply complex local policies without divulging
its BGP configurations to others.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korkmaz:2000:EAF,
author = "Turgay Korkmaz and Marwan Krunz and Spyros Tragoudas",
title = "An efficient algorithm for finding a path subject to
two additive constraints",
journal = j-SIGMETRICS,
volume = "28",
number = "1",
pages = "318--327",
month = jun,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/339331.339427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:31:11 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the key issues in providing end-to-end
quality-of-service guarantees in packet networks is how
to determine a feasible route that satisfies a set of
constraints while simultaneously maintaining high
utilization of network resources. In general, finding a
path subject to multiple additive constraints (e.g.,
delay, delay-jitter) is an NP-complete problem that
cannot be exactly solved in polynomial time.
Accordingly, heuristics and approximation algorithms
are often used to address to this problem. Previously
proposed algorithms suffer from either excessive
computational cost or low performance. In this paper,
we provide an efficient approximation algorithm for
finding a path subject to two additive constraints. The
worst-case computational complexity of this algorithm
is within a logarithmic number of calls to Dijkstra's
shortest path algorithm. Its average complexity is much
lower than that, as demonstrated by simulation results.
The performance of the proposed algorithm is justified
via theoretical performance bounds. To achieve further
performance improvement, several extensions to the
basic algorithm are also provided at low extra
computational cost. Extensive simulations are used to
demonstrate the high performance of the proposed
algorithm and to contrast it with other path selection
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multiple constrained path selection; QoS routing;
scalable routing",
}
@Article{Kant:2000:WPA,
author = "Krishna Kant",
title = "{Workshop on Performance and Architecture of Web
Servers (PAWS-2000)}",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "3--4",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.581257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2000:SIS,
author = "Krishna Kant and Prasant Mohapatra",
title = "Scalable {Internet} servers: issues and challenges",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "5--8",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brandman:2000:CFW,
author = "Onn Brandman and Junghoo Cho and Hector Garcia-Molina
and Narayanan Shivakumar",
title = "Crawler-friendly {Web} servers",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "9--14",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study how to make web servers (e.g.,
Apache) more crawler friendly. Current web servers
offer the same interface to crawlers and regular web
surfers, even though crawlers and surfers have very
different performance requirements. We evaluate simple
and easy-to-incorporate modifications to web servers so
that there are significant bandwidth savings.
Specifically, we propose that web servers export
meta-data archives describing their content.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burns:2000:CLD,
author = "Randal C. Burns and Darrell D. E. Long and Robert M.
Rees",
title = "Consistency and locking for distributing updates to
{Web} servers using a file system",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "15--21",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed file systems are often used to replicate a
Web site's content among its many servers. However, for
content that needs to be dynamically updated and
distributed to many servers, file system locking
protocols exhibit high latency and heavy network usage.
Poor performance arises because the Web-serving
workload differs from the assumed workload. To address
the shortcomings of file systems, we introduce the {\em
publish consistency\/} model well suited to the
Web-serving workload and implement it in the {\em
producer-consumer\/} locking protocol. A comparison of
this protocol against other file system protocols by
simulation shows that producer-consumer locking removes
almost all latency due to protocol overhead and
significantly reduces network load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vasiliou:2000:PDQ,
author = "Nikolaos Vasiliou and Hanan Lutfiyya",
title = "Providing a differentiated quality of service in a
{World Wide Web} server",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "22--28",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a strategy of extending a Web
server to be able to differentiate between requests in
different classes. This is required because most Web
servers are unable to do this by themselves. We present
our strategy and its design along with some initial
performance results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhattacharjee:2000:BFB,
author = "Samrat Bhattacharjee and William C. Cheng and Cheng-Fu
Chou and Leana Golubchik and Samir Khuller",
title = "{Bistro}: a framework for building scalable wide-area
{\em {Upload\/}} applications",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "29--35",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hot spots are a major obstacle to achieving
scalability in the Internet. At the application layer,
hot spots are usually caused by either (a) high demand
for some data or (b) high demand for a certain service.
This high demand for data or services, is typically the
result of a {\em real-life event\/} involving
availability of new data or approaching deadlines;
therefore, relief of these hot spots may improve
quality of life. At the application layer, hot spot
problems have traditionally been dealt with using some
combination of (1) increasing capacity; (2) spreading
the load over time, space, or both; and (3) changing
the workload. We note that the classes of solutions
stated above have been studied mostly in the context of
applications using the following types of communication
(a) one-to-many, (b) many-to-many, and (c) one-to-one.
However, to the best of our knowledge there is no
existing work on making applications using {\em
many-to-one\/} communication scalable and efficient
(existing solutions, such as web based submissions,
simply use many independent one-to-one transfers). This
corresponds to an important class of applications,
whose examples include the various {\em upload\/}
applications such as submission of income tax forms,
conference paper submission, proposal submission
through the NSF FastLane system, homework and project
submissions in distance education, voting in digital
democracy applications, voting in interactive
television, and many more. Consequently, the main focus
of this paper is {\em scalable infrastructure design
for relief of hot spots in wide-area upload
applications}. The main contributions of this paper are
as follows. We state (a) a new problem, specifically,
the many-to-one communication, or {\em upload}, problem
as well as (b) the (currently) fundamental obstacles to
building scalable wide-area upload applications. We
also propose a general framework, which we term the
{\em Bistro\/} system, for a class of solutions to the
upload problem. In addition, we suggest a number of
open research problems, within this framework,
throughout the paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kraemer:2000:MIO,
author = "E. Kraemer and G. Paix{\~a}o and D. Guedes and W.
{Meira, Jr.} and V. Almeida",
title = "Minimizing the impact of orphan requests in e-commerce
services",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "36--42",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The most common problem of an overloaded
electronic-commerce server is an increase in the
response time perceived by customers, who may restart
their requests hoping to get a faster response, or
simply abort them, giving up on the store. Both
behaviors generate `orphan' requests: although they
were received by the server, they should not be
answered because their requestors have already
abandoned them. Orphan requests waste system resources,
since the server becomes aware of their cancellation
only when it tries to send a response and finds out
that the connection was closed. In this paper we
propose a new kernel service, the Connection Sentry,
which keeps track of requests being performed and
notify processes about an eventual cancellation. Once
notified, a process can interrupt the execution of the
request, saving system resources and bandwidth. We
evaluated the gains by using our proposal in a virtual
bookstore, where we observed that the Connection Sentry
reduced service latency by up to 31\% and increased the
throughput by 27\% in overloaded servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Davison:2000:PPI,
author = "Brian D. Davison and Vincenzo Liberatore",
title = "Pushing politely: improving {Web} responsiveness one
packet at a time",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "43--43",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The rapid growth of traffic on the World-Wide Web
results in heavier loads on networks and servers and in
increased latency experienced while retrieving web
documents. This paper presents a framework that
exploits idle periods to satisfy future HTTP requests
speculatively and opportunistically. Our proposal
differs from previous schemes in that speculative
dissemination always gives precedence to on-demand
traffic, uses ranged requests for improved performance,
and can be implemented over a connectionless transport.
The protocol uses bounded and little server state even
as the workload was increased and it is resistant to
erroneous estimates of available bandwidth. Substantial
latency improvements are reported over pure on-demand
strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2000:CWU,
author = "Martin Arlitt",
title = "Characterizing {Web} user sessions",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "50--63",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.362920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a detailed characterization of
user sessions to the 1998 World Cup Web site. This
study analyzes data that was collected from the World
Cup site over a three month period. During this time
the site received 1.35 billion requests from 2.8
million distinct clients. This study focuses on
numerous user session characteristics, including
distributions for the number of requests per session,
number of pages requested per session, session length
and inter-session times. This paper concludes with a
discussion of how these characteristics can be utilized
in improving Web server performance in terms of the
end-user experience.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hadharan:2000:EEP,
author = "R. Hadharan and W. K. Ehrlich and D. Cura and P. K.
Reeser",
title = "End to End Performance Modeling of {Web} Server
Architectures",
journal = j-SIGMETRICS,
volume = "28",
number = "2",
pages = "57--63",
month = sep,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/362883.581258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:31 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web server performance in a distributed
Object-Oriented (OO) environment is a complex interplay
between a variety of factors (e.g., hardware platform,
threading model, object scope model, server operating
system, network bandwidth, disk file size, caching). In
this paper, we present a model-based approach to Web
Server performance evaluation in terms of an end-to-end
queueing model implemented in a simulation tool. We
have applied this model to Active Server Page (ASP) and
Common Object Model (COM) technology in Microsoft's
Internet Information Server and to the Java Server Page
(JSP) and JavaBean technology in both IIS and Netscape
Enterprise Server (NES). Our results indicate that for
the ASP Script Engine, performance predictions from the
simulation model matched the performance observed in a
test environment. However, for the JSP Script Engine,
the model predicted higher throughput than laboratory
test results at high load. This result suggests that
Web Server performance can be severely limited by a
software bottleneck that causes requests to be
serialized. This may cause a request to wait for some
resource (i.e., a lock) as opposed to consuming CPU or
memory. Implications of these results for Web Server
performance in general are discussed",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2000:AAS,
author = "Weiping Zhu",
title = "An approximate analysis of the shortest queue policy
on soft real-time scheduling",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "3--10",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377618",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The {\em join the shortest queue\/} (JSQ) policy is
studied in the context of soft real-time scheduling. An
approximate analysis of the JSQ is developed and
presented in this paper. The result obtained from the
approximate analysis is compared against the simulation
one, that shows the approximate analysis is highly
accurate. Thus, the approximate analysis can be applied
to the development of soft real-time systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2000:SIP,
author = "Bo Li and Kazem Sohraby",
title = "Special Issue on Performance Issues in Mobile
Computing",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "11--11",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.581259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2000:PWC,
author = "Ming Feng Chang and Yi-Bing Lin",
title = "Performance of a weakly consistent wireless {Web}
access mechanism",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "12--20",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377619",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In wireless web information access, long response may
be experienced. To reduce the response times of
wireless data access in a mobile network, caches are
utilized in the wireless handheld devices or wireless
proxy server. This paper proposes a wireless web data
access algorithm for WAP (wireless application
protocol) caching proxy to speed up data access. Our
algorithm utilizes the access frequency to tune the
data expiration time. The performance of the algorithm
is investigated and is compared with existing TTL-based
algorithms. Our study indicates that good performance
is expected for the new algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Toh:2000:EAH,
author = "C.-K. Toh and Richard Chen and Minar Delwar and Donald
Allen",
title = "Experimenting with an {Ad Hoc} wireless network on
campus: insights and experiences",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "21--29",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377622",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad hoc wireless networks are new communication
networks that can be dynamically formed and deformed
on-the-fly, anytime and anywhere. User data is routed
with the help of an ad hoc mobile routing protocol.
Before the deployment of ad hoc mobile services, the
communication performance of such networks has to be
evaluated to demonstrate the practicality limits based
on today's hardware and innovative communication
software. This paper describes the realization of an ad
hoc wireless testbed and the various experimental field
trials performed on campus. In particular, throughput,
end-to-end delay, route discovery time, and the impact
of varying source packet size and beaconing intervals
are examined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lang:2000:PED,
author = "Tanja Lang and Daniel Floreani",
title = "Performance evaluation of different {TCP} error
detection and congestion control strategies over a
wireless link",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "30--38",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377623",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an evaluation of the two
major parts of TCP that impact its performance in
wireless environments, namely error detection and
congestion control. We have re-implemented the most
commonly used TCP error detection and congestion
control strategies using a modular design technique.
Using this implementation we have evaluated the
performance in terms of throughput and underlying
network usage of different combinations of these
strategies over a lossy link with high propagation
delay. Our results have shown that selective
acknowledgments work well together with any congestion
control mechanism and that some combinations of error
detection and congestion control suffer from a high
amount of unnecessary retransmissions. Consequentely we
propose a solution to this problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chitre:2000:IBS,
author = "Vikrant A. Chitre and John N. Daigle",
title = "{IP}-based services over {GPRS}",
journal = j-SIGMETRICS,
volume = "28",
number = "3",
pages = "39--47",
month = dec,
year = "2000",
CODEN = "????",
DOI = "https://doi.org/10.1145/377616.377624",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:33:59 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The utility of mobile computing in the future will be
determined to a large degree by the quality of service
achievable over cellular based systems. In this paper,
we examine the traffic-handling capabilities of General
Packet Radio Service (GPRS) with respect to supporting
IP-based Internet services. We begin with an overview
of GPRS. We then present an analytical model to assess
throughput of the reverse link as a function of the
number of users connected and the distribution of user
message lengths for a scenario in which users are
continuously backlogged. Next, we investigate the
capability of GPRS to support World Wide Web access
using a modified version of the analytical model.
Specifically, we present a realistic scenario for user
sessions operating under the Hypertext Transfer
Protocol (HTTP), and we assess the transaction-handling
capabilities as a function of the number of user
sessions, taking into account network delays, forward
link transmission, random access delay, and other
factors. We also consider a scenario where both
continuously backlogged users and users operating HTTP
sessions are present. We conclude with a discussion of
some open issues in the design of GPRS based Internet
access.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cellular data service; IP over wireless; performance;
queues with contention",
}
@Article{Squillante:2001:SIWa,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA 2000)}",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "2--2",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2001:JPU,
author = "Mor Harchol-Balter",
title = "Job placement with unknown duration and no
preemption",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "3--5",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544399",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2001:OPT,
author = "Leana Golubchik and John C. S. Lui",
title = "Open problems for threshold-based systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "6--8",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2001:TPS,
author = "E. G. {Coffman, Jr.} and Predrag Jelenkovi{\'c}",
title = "Threshold policies for single-resource reservation
systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "9--10",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Requests for a resource arrive at rate $ \lambda $,
each request specifying a future time interval, called
a {\em reservation interval}, to be booked for its use
of the resource. The {\em advance notices\/} (delays
before reservation intervals are to begin) are
independent and drawn from a distribution $ A(z) $. The
durations of reservation intervals are sampled from the
distribution $ B(z) $ and are independent of each other
and the advance notices. We let $A$ and $B$ denote
random variables with the distributions $ A(z)$ and $
B(z)$ (the functional notation will always allow one to
distinguish between our two uses of the symbols $A$ and
$B$).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolf:2001:LBC,
author = "Joel L. Wolf and Philip S. Yu",
title = "Load balancing for clustered {Web} farms",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "11--13",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a scheme which attempts to optimally
balance the load on the servers of a clustered web
farm. Solving this performance problem is crucial to
achieving minimal average response time for customer
requests, and thus ultimately to achieving maximal
customer throughput. This short paper gives an overview
of three new mathematical contributions. First, we
describe a {\em goal setting\/} algorithm to determine
the load on each server which minimizes the average
customer request response time given the possibly
overlapping cluster assignments of sites to servers and
the current customer request load for each site. The
cluster assignments, which of necessity can only be
changed relatively infrequently, have a major effect on
the optimal response time in the goal setting
component. So, second, we describe a {\em static\/}
algorithm which determines good assignments of sites to
servers. Third, and finally, we describe a {\em
dynamic\/} algorithm which handles the real-time server
load balancing, reacting to the fluctuating customer
request load in order to come as close as possible to
achieving the idealized optimal average response time.
We examine the performance of the overall load
balancing scheme via simulation experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:2001:TAA,
author = "Edmundo {de Souza e Silva} and Rosa M. M. Le{\~a}o and
Morganna C. Diniz",
title = "Transient analysis applied to traffic modeling",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "14--16",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544403",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic modeling has been an extensive area of
research in the last few years, and a lot of modeling
effort has been devoted to better understand the issues
involved in multiplexing traffic over high speed links.
The goals of the performance analyst include the
development of accurate traffic models to predict, with
sufficient accuracy, the impact of the traffic
generated by applications over the network resources,
and the evaluation of the quality of service (QoS)
being achieved. Performance studies include determining
buffer behavior, evaluate cell loss probability,
admission control algorithms, and many others. One
performance study issue is the calculation of {\em
descriptors\/} from different traffic models. In the
literature, one can find a large number of models that
have been proposed, including Markov and non-Markovian
models [1]. Although not possessing the long-range
dependence property, Markov models are still attractive
not only due to their mathematical tractability but
also because it has been shown that long-range
correlations can be approximately obtained from certain
kinds of Markovian models (e.g. [11]). Furthermore,
works such as [8] show that Markov models can be used
to accurately predict performance metrics. Once a set
of traffic models is chosen, the modeler should obtain
the desired performance measures. Hopefully the
measures should be calculated analytically using
efficient algorithms. The modeling steps briefly
outlined above may require the transient analysis of
general Markovian models, including Markov reward
models. One of the goals of this work is to present new
algorithms we developed to obtain efficiently measures
such as the transient queue length distribution (and
from that, the packet loss ratio as a function of time)
directly from the model of the source feeding the
queue. We also obtain second order descriptors such as
the index of dispersion and the autocovariance from the
source models. Using these algorithms the modeler can
evaluate the efficacy of different Markovian models to
predict performance metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bu:2001:FPAa,
author = "T. Bu and D. Towsley",
title = "A fixed point approximation of {TCP} behavior in a
network",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "17--18",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2001:LDA,
author = "Cheng-Shang Chang and Yuh-ming Chiu and Wheyming Tina
Song",
title = "Large deviation analysis for multiplexing independent
regulated inputs",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "19--21",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the performance analysis
problem for a work conserving link with a large number
of independent regulated inputs. For such a problem, we
derive simple stochastic bounds under a general traffic
constraint for the inputs. The bound for queue length
is shown to be a stochastic extension of the
deterministic worst case bound and it is asymptotically
tighter than the bound in Kesidis and Konstantopoulos
[5]. We also test the bound by considering periodic
inputs with independent starting phases. Based on
importance sampling, we propose a fast simulation
algorithm that achieves significant variance reduction.
The simulations results are compared with our
stochastic bound and the bound [5].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kuang:2001:CSA,
author = "Lei Kuang and Armand M. Makowski",
title = "Convex stability and asymptotic convex ordering for
non-stationary arrival processes",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "22--23",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The notion of convex stability for a sequence of
non-negative random variables is discussed in the
context of several applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2001:RRM,
author = "Eitan Bachmat",
title = "Recent results in mathematical modeling and
performance evaluation of disks and disk array",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "24--26",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the seventies and eighties extensive work on
mathematical modeling and performance evaluation of
disks and disk arrays was carried out. The main tools
were stochastic and combinatorial analysis. For the
combinatorial approach led by C. K. Wong and his
collaborators the reader is urged to consult [11]. for
the stochastic approach led by E. G. Coffman and his
collaborators one should consult [3]. Both references
provide rather extensive bibliographies. In the late
eighties and the nineties with the coming of the RAID
`revolution', most of the work in the area has become
rather heuristic in nature, see [5] for a survey, with
a few notable exceptions. In this abstract we would
like to report on two recent results which relate
performance and modeling issues in disks and disk
arrays to the theory of metric spaces and the theory of
graph evolution and phase transition. We hope this will
revive the spirit of the work done in the seventies and
eighties (in other walks of life this may not be
advisable). The results are taken from [2] and [4]",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hogstedt:2001:GCA,
author = "Karin Hogstedt and Doug Kimelman and V. T. Rajan and
Tova Roth and Mark Wegman",
title = "Graph cutting algorithms for distributed applications
partitioning",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "27--29",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of optimally allocating the components of
a distributed program over several machines can be
shown to reduce to a multi-terminal graph cutting
problem. In case of three of more terminals, this
problem has been shown to be NP-hard. This paper
introduces a number of heuristic graph algorithms for
use in partitioning distributed object applications ---
that is, for deciding which objects should be placed on
which machines in order to minimize communication and
achieve best overall performance of the application.
These heuristics are particularly effective for graphs
with characteristics specific to representative
distributed object applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fernandes:2001:TSL,
author = "Paulo Fernandes and Brigitte Plateau",
title = "Triangular solution of linear systems in tensor
product format",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "30--32",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an algorithm to solve linear
systems expressed by a matrix stored in a tensor
product format. The proposed solution is based on a LU
decomposition of the matrix keeping the tensor product
structure. It is shown that the complexity of the
decomposition is negligible and the backward and
forward substitutions are no more complex than two
standard vector-matrices multiplications. Finally,
applications of the proposed algorithm and the
comparison with other similar techniques are
discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Capra:2001:UPS,
author = "L. Capra and C. Dutheillet and G. Franceschinis and J.
M. Ili{\'e}",
title = "On the use of partial symmetries for lumping {Markov}
chains",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "33--35",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper a method is proposed, to exploit
partially symmetric behavior of systems for efficient
performance evaluation. The method works on performance
models described with the Stochastic Well-Formed Nets
(SWN) formalism: it allows to automatically discover
partial symmetries in the model behavior, and directly
derive a lumped Markov chain from it, suitable for
performance analysis purposes. With respect to previous
works on automatic exploitation of symmetries in SWNs,
the proposed approach allows a significantly higher
reduction of the state space size in many practical
cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haas:2001:EDN,
author = "Peter J. Haas",
title = "Estimation of delays in non-regenerative
discrete-event stochastic systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "36--38",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2001:DSC,
author = "David Gamarnik",
title = "On deciding stability of constrained random walks and
queueing systems",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "39--40",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544412",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider in this paper two types of queueing
systems which operate under a specific and fixed
scheduling policy. The first system consists of a
single server and several buffers in which arriving
jobs are stored. We assume that arriving parts may
require several stages of processing in which case each
stage corresponds to a different buffer. The second
system is a communication type queueing network given
by a graph. The arriving jobs (packets) request a
simple path along which they need to be processed. In
both models the jobs arrive in a completely
deterministic fashion: the interarrival times are fixed
and known. All the processing times are also
deterministic. A scheduling policy specifies a rule
using which arriving parts are processed in the
queueing system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2001:AQU,
author = "Mark S. Squillante and Baffelly Woo and Li Zhang",
title = "Analysis of queues under correlated arrivals with
applications to {Web} server performance",
journal = j-SIGMETRICS,
volume = "28",
number = "4",
pages = "41--43",
month = mar,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/544397.544413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:13 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of many high-volume Web sites often
makes it difficult to mathematically analyze various
performance measures. Since these complex behaviors can
have a significant impact on performance, it is
important to capture them in sufficient detail in the
analysis of the corresponding queueing systems. We
consider the access logs from a particular class of
high-volume Web sites serving dynamic content to obtain
a better understanding of the complexities of user
request patterns in such environments. Our analysis
demonstrates that these arrival patterns exhibit strong
dependence structures which can be accurately
represented by an arrival process with strong
(short-range) correlations, at least for the class of
Web sites motivating our study [2]. Based on these
results, we develop a methodology for approximating
this class of dependent arrival processes by a set of
phase-type distributions. Our approach consists of
formulating and solving a nonlinear optimization
problem that fits a set of dependent stochastic models
to approximate the interarrival time patterns from the
data, which includes matching the autocorrelation
function. To evaluate the effectiveness of our
approach, we conduct a large number of statistical
tests and experiments showing that our methodology
provides an excellent match between the real user
request data and the fitted approximate arrival
process. Given this dependent arrival process as input,
we then derive an exact matrix-analytic analysis of a
general multi-server queue under two server queueing
disciplines. This analysis yields results that provide
significant reductions in the numerical computation
required to solve the queueing models. To demonstrate
the accuracy of the performance measures obtained under
these methods, a large number of experiments were
performed and detailed comparisons were made between
the sojourn time measures from our analysis and the
corresponding measures obtained from simulation of the
queueing system under the actual user request data.
These results show both sets of performance measures to
be in excellent agreement, with relative errors
consistently less than 5\%, and further demonstrate the
robustness of our approach. We also conduct a set of
numerical experiments that exploit our matrix-analytic
analysis and its computational efficiency, which are
then used to establish some important results for
multi-server queues under dependent arrival processes.
This includes the notion of effective stability where
the point at which the mean sojourn time of the queue
exceeds a large constant (e.g., 1000) multiplied by the
mean service time occurs well before the theoretical
stability condition for the queue. Due to space
limitations, we simply summarize a subset of our
results in this extended abstract. We refer the
interested reader to [1] for additional details,
references and results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Narlikar:2001:PMF,
author = "Girija Narlikar and Francis Zane",
title = "Performance modeling for fast {IP} lookups",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "1--12",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we examine algorithms and data
structures for the longest prefix match operation
required for routing IP packets. Previous work, aimed
at hardware implementations, has focused on quantifying
worst case lookup time and memory usage. With the
advent of fast programmable platforms, whether network
processor or PC-based, metrics which look instead at
average case behavior and memory cache performance
become more important. To address this, we consider a
family of data structures capturing the important
techniques used in known fast IP lookup schemes. For
these data structures, we construct a model which,
given an input trace, estimates cache miss rates and
predicts average case lookup performance. This model is
validated using traces with varying characteristics.
Using the model, we then choose the best data structure
from this family for particular hardware platforms and
input traces; we find that the optimal data structure
differs in different settings. The model can also be
used to select the appropriate hardware configurations
for future lookup engines. The lookup performance of
the selected data structures is competitive with the
fastest available software implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qie:2001:SCS,
author = "Xiaohu Qie and Andy Bavier and Larry Peterson and
Scott Karlin",
title = "Scheduling computations on a software-based router",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "13--24",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent efforts to add new services to the Internet
have increased the interest in software-based routers
that are easy to extend and evolve. This paper
describes our experiences implementing a software-based
router, with a particular focus on the main difficulty
we encountered: how to schedule the router's CPU
cycles. The scheduling decision is complicated by the
desire to differentiate the level of service for
different packet flows, which leads to two fundamental
conflicts: (1) assigning processor shares in a way that
keeps the processes along the forwarding path in
balance while meeting QoS promises, and (2) adjusting
the level of batching in a way that minimizes overhead
while meeting QoS promises.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2001:DMP,
author = "Xun Su and Gustavo de Veciana",
title = "Dynamic multi-path routing: asymptotic approximation
and simulations",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "25--36",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the dynamic multi-path routing
problem. We focus on an operating regime where traffic
flows arrive at and depart from the network in a bursty
fashion, and where the delays involved in link state
advertisement may lead to `synchronization' effects
that adversely impact the performance of dynamic
single-path routing schemes. We start by analyzing a
simple network of parallel links, where the goal is to
minimize the average increase in network congestion on
the time scale of link state advertisements. We
consider an asymptotic regime leading to an
optimization problem permitting closed-form analysis of
the number of links over which dynamic multi-path
routing should be conducted. Based on our analytical
result we examine three types of dynamic routing
schemes, and identify a robust policy, {\em i.e.},
routing the traffic to a set of links with loads within
a factor of the least loaded, that exhibits robust
performance. We then propose a similar policy for mesh
networks and show by simulation some of its desirable
properties. The main results suggest that our proposal
would provide significant performance improvement for
high speed networks carrying bursty traffic flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jones:2001:PRS,
author = "Michael B. Jones and Stefan Saroiu",
title = "Predictability requirements of a soft modem",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "37--49",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em Soft Modems\/} use the main processor to execute
modem functions traditionally performed by hardware on
the modem card. To function correctly, soft modems
require that ongoing signal processing computations be
performed on the host CPU in a timely manner. Thus,
signal processing is a commonly occurring background
real-time application---one running on systems that
were not designed to support predictable real-time
execution. This paper presents a detailed study of the
performance characteristics and resource requirements
of a popular soft modem. Understanding these
requirements should inform the efforts of those
designing and building operating systems needing to
support soft modems. Furthermore, we believe that the
conclusions of this study also apply to other existing
and upcoming soft devices, such as soft Digital
Subscriber Line (DSL) cards. We conclude that (1)
signal processing in an interrupt handler is not only
unnecessary but also detrimental to the predictability
of other computations in the system and (2) a real-time
scheduler can provide predictability for the soft modem
while minimizing its impact on other computations in
the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CPU scheduling; open real-time system; real-time;
Rialto; Rialto/NT; signal processing; soft devices;
soft modem; Windows 2000; Windows NT",
}
@Article{Lorch:2001:IDV,
author = "Jacob R. Lorch and Alan Jay Smith",
title = "Improving dynamic voltage scaling algorithms with
{PACE}",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "50--61",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378429",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses algorithms for dynamically
varying (scaling) CPU speed and voltage in order to
save energy. Such scaling is useful and effective when
it is immaterial when a task completes, as long as it
meets some deadline. We show how to modify any scaling
algorithm to keep performance the same but minimize
expected energy consumption. We refer to our approach
as PACE (Processor Acceleration to Conserve Energy)
since the resulting schedule increases speed as the
task progresses. Since PACE depends on the probability
distribution of the task's work requirement, we present
methods for estimating this distribution and evaluate
these methods on a variety of real workloads. We also
show how to approximate the optimal schedule with one
that changes speed a limited number of times. Using
PACE causes very little additional overhead, and yields
substantial reductions in CPU energy consumption.
Simulations using real workloads show it reduces the
CPU energy consumption of previously published
algorithms by up to 49.5\%, with an average of 20.6\%,
without any effect on performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vaidyanathan:2001:AIS,
author = "Kalyanaraman Vaidyanathan and Richard E. Harper and
Steven W. Hunter and Kishor S. Trivedi",
title = "Analysis and implementation of software rejuvenation
in cluster systems",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "62--71",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378434",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several recent studies have reported the phenomenon of
`software aging', one in which the state of a software
system degrades with time. This may eventually lead to
performance degradation of the software or crash/hang
failure or both. `Software rejuvenation' is a
pro-active technique aimed to prevent unexpected or
unplanned outages due to aging. The basic idea is to
stop the running software, clean its internal state and
restart it. In this paper, we discuss software
rejuvenation as applied to cluster systems. This is
both an innovative and an efficient way to improve
cluster system availability and productivity. Using
Stochastic Reward Nets (SRNs), we model and analyze
cluster systems which employ software rejuvenation. For
our proposed time-based rejuvenation policy, we
determine the optimal rejuvenation interval based on
system availability and cost. We also introduce a new
rejuvenation policy based on prediction and show that
it can dramatically increase system availability and
reduce downtime cost. These models are very general and
can capture a multitude of cluster system
characteristics, failure behavior and performability
measures, which we are just beginning to explore. We
then briefly describe an implementation of a software
rejuvenation system that performs periodic and
predictive rejuvenation, and show some empirical data
from systems that exhibit aging",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Loh:2001:TSA,
author = "Gabriel Loh",
title = "A time-stamping algorithm for efficient performance
estimation of superscalar processors",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "72--81",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378437",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increasing complexity of modern superscalar
microprocessors makes the evaluation of new designs and
techniques much more difficult. Fast and accurate
methods for simulating program execution on realistic
and hypothetical processor models are of great interest
to many computer architects and compiler writers. There
are many existing techniques, from profile based
runtime estimation to complete cycle-level simulations.
Many researchers choose to sacrifice the speed of
profiling for the accuracy obtainable by cycle-level
simulators. This paper presents a technique that
provides accurate performance predictions, while
avoiding the complexity associated with a complete
processor emulator. The approach augments a fast
in-order simulator with a time-stamping algorithm that
provides a very good estimate of program running time.
This algorithm achieves an average accuracy that is
within 7.5\% of a cycle-level out-of-order simulator in
approximately 41\% of the running time on the eight
SPECInt95 integer benchmarks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2001:IFI,
author = "Thomas Bonald and Laurent Massouli{\'e}",
title = "Impact of fairness on {Internet} performance",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "82--91",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378438",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss the relevance of fairness as a design
objective for congestion control mechanisms in the
Internet. Specifically, we consider a backbone network
shared by a dynamic number of short-lived flows, and
study the impact of bandwidth sharing on network
performance. In particular, we prove that for a broad
class of fair bandwidth allocations, the total number
of flows in progress remains finite if the load of
every link is less than one. We also show that provided
the bandwidth allocation is `sufficiently' fair,
performance is optimal in the sense that the throughput
of the flows is mainly determined by their access rate.
Neither property is guaranteed with unfair bandwidth
allocations, when priority is given to one class of
flow with respect to another. This suggests current
proposals for a differentiated services Internet may
lead to suboptimal utilization of network resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Salamatian:2001:HMM,
author = "Kav{\'e} Salamatian and Sandrine Vaton",
title = "Hidden {Markov} modeling for network communication
channels",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "92--101",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378439",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we perform the statistical analysis of
an Internet communication channel. Our study is based
on a Hidden Markov Model (HMM). The channel switches
between different states; to each state corresponds the
probability that a packet sent by the transmitter will
be lost. The transition between the different states of
the channel is governed by a Markov chain; this Markov
chain is not observed directly, but the received packet
flow provides some probabilistic information about the
current state of the channel, as well as some
information about the parameters of the model. In this
paper we detail some useful algorithms for the
estimation of the channel parameters, and for making
inference about the state of the channel. We discuss
the relevance of the Markov model of the channel; we
also discuss how many states are required to
pertinently model a real communication channel.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active measurement expectation-maximization; hidden
Markov model; Internet modelling; network state
estimation",
}
@Article{Cao:2001:NIT,
author = "Jin Cao and William S. Cleveland and Dong Lin and Don
X. Sun",
title = "On the nonstationarity of {Internet} traffic",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "102--112",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378440",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic variables on an uncongested Internet wire
exhibit a pervasive nonstationarity. As the rate of new
TCP connections increases, arrival processes (packet
and connection) tend locally toward Poisson, and time
series variables (packet sizes, transferred file sizes,
and connection round-trip times) tend locally toward
independent. The cause of the nonstationarity is
superposition: the intermingling of sequences of
connections between different source-destination pairs,
and the intermingling of sequences of packets from
different connections. We show this empirically by
extensive study of packet traces for nine links coming
from four packet header databases. We show it
theoretically by invoking the mathematical theory of
point processes and time series. If the connection rate
on a link gets sufficiently high, the variables can be
quite close to Poisson and independent; if major
congestion occurs on the wire before the rate gets
sufficiently high, then the progression toward Poisson
and independent can be arrested for some variables.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hsieh:2001:PCC,
author = "Hung-Yun Hsieh and Raghupathy Sivakumar",
title = "Performance comparison of cellular and multi-hop
wireless networks: a quantitative study",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "113--122",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378441",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the performance trade-offs
between conventional cellular and multi-hop ad-hoc
wireless networks. We compare through simulations the
performance of the two network models in terms of raw
network capacity, end-to-end throughput, end-to-end
delay, power consumption, per-node fairness (for
throughput, delay, and power), and impact of mobility
on the network performance. The simulation results show
that while ad-hoc networks perform better in terms of
throughput, delay, and power, they suffer from
unfairness and poor network performance in the event of
mobility. We discuss the trade-offs involved in the
performance of the two network models, identify the
specific reasons behind them, and argue that the
trade-offs preclude the adoption of either network
model as a clear solution for future wireless
communication systems. Finally, we present a simple
hybrid wireless network model that has the combined
advantages of cellular and ad-hoc wireless networks but
does not suffer from the disadvantages of either.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hegde:2001:BLM,
author = "Nidhi Hegde and Khosrow Sohraby",
title = "Blocking in large mobile cellular networks with bursty
traffic",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "123--132",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378442",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider large cellular networks. The traffic
entering the network is assumed to be correlated in
both {\em space\/} and {\em time.\/} The space
dependency captures the possible correlation between
the arrivals to different nodes in the network, while
the time dependency captures the time correlation
between arrivals to each node. We model such traffic
with a Markov-Modulated Poisson Process(MMPP).It is
shown that even in the single node environment, the
problem is not mathematically tractable. A model with
an infinite number of circuits is used to approximate
the finite model. A novel recursive methodology is
introduced in finding the joint moments of the number
of busy circuits in different cells in the network
leading to accurate determination of blocking
probability. A simple mixed-Poisson distribution is
introduced as an accurate approximation of the
distribution of the number of busy circuits. We show
that for certain cases, in the system with an infinite
number of circuits in each cell, there is no effect of
mobility on the performance of the system. Our
numerical results indicate that the traffic burstiness
has a major impact on the system performance. The
mixed-Poisson approximation is found to be a very good
fit to the exact finite model. The performance of this
approximation using few moments is affected by traffic
burstiness and average load. We find that in a
reasonable range of traffic burstiness, the
mixed-Poisson distribution provides a close
approximation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumar:2001:CEF,
author = "Apurva Kumar and Rajeev Gupta",
title = "Capacity evaluation of frequency hopping based ad-hoc
systems",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "133--142",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378443",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The IEEE 802.15 Wireless Personal Area Networks (WPAN)
study group has been working on evolving a standard for
short-range wireless connectivity between low
complexity and low power devices operating within the
personal operating space (POS). The scenarios
envisioned for WPANs are likely to involve a large
number of POSs operating in an indoor environment.
Among short-range wireless technologies, Bluetooth$^{TM
1}$ based ad-hoc connectivity comes closest to
satisfying the WPAN requirements. Bluetooth provides a
gross rate of 1 Mbps per network and allows several
such networks to overlap using frequency hopping. The
`aggregate throughput' thus achieved is much higher
than 1 Mbps. In the absence of external interfering
sources, aggregate throughput is limited by self
interference which depends upon, (i) physical layer
parameters like hopping rate, hopping sequences,
transmitted power, receiver sensitivity, modulation,
forward error correction (ii) channel characteristics
like coherence bandwidth and coherence time (iii)
spatial characteristics. In this work we consider the
problem of finding the capacity of Bluetooth based
ad-hoc systems by accurately modeling the Bluetooth
physical layer and the indoor wireless channel. We
predict the throughput in Bluetooth based ad-hoc
systems as a function of a generalized set of
parameters using realistic scenarios and assumptions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ad-hoc networks; bit error rate; Bluetooth technology;
capacity; forward error correction; frequency hopping;
GFSK; throughput",
}
@Article{Qiu:2001:NPF,
author = "Dongyu Qiu and Ness B. Shroff",
title = "A new predictive flow control scheme for efficient
network utilization and {QoS}",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "143--153",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378777",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we develop a new predictive flow control
scheme and analyze its performance. This scheme
controls the non-real-time traffic based on predicting
the real-time traffic. The goal of the work is to
operate the network in a low congestion, high
throughput regime. We provide a rigorous analysis of
the performance of our flow control method and show
that the algorithm has attractive and useful
properties. From our analysis we obtain an explicit
condition that gives us design guidelines on how to
choose a predictor. We learn that it is especially
important to take the queueing effect into account in
developing the predictor. We also provide numerical
results comparing different predictors that use varying
degrees of information from the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Paschalidis:2001:MBE,
author = "Ioannis Ch. Paschalidis and Spyridon Vassilaras",
title = "Model-based estimation of buffer overflow
probabilities from measurements",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "154--163",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378778",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of estimating buffer overflow
probabilities when the statistics of the input traffic
are not known and have to be estimated from
measurements. We start by investigating the use of
Markov-modulated processes in modeling the input
traffic and propose a method for selecting an optimal
model based on Akaike's Information Criterion. We then
consider a queue fed by such a Markov-modulated input
process and use large deviations asymptotics to obtain
the buffer overflow probability. The expression for
this probability is affected by estimation errors in
the parameters of the input model. We analyze the
effect of these errors and propose a new, more robust,
estimator which is less likely to underestimate the
overflow probability than the estimator obtained by
certainty equivalence. As such, it is appropriate in
situations where the overflow probability is associated
with {\em Quality of Service (QoS)\/} and we need to
provide firm QoS guarantees. Nevertheless, as the
number of observations increases, the proposed
estimator converges with probability 1 to the
appropriate target, and thus, does not lead to resource
underutilization in this limit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Akaike's information criterion; effective bandwidth;
estimation; large deviations; Markov-modulated
processes",
}
@Article{Dutta:2001:OTG,
author = "Rudra Dutta and George N. Rouskas",
title = "On optimal traffic grooming in {WDM} rings",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "164--174",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of designing a virtual
topology to minimize electronic routing, that is,
grooming traffic, in wavelength routed optical rings.
We present a new framework consisting of a sequence of
bounds, both upper and lower, in which each successive
bound is at least as strong as the previous one. The
successive bounds take larger amounts of computation to
evaluate, and the number of bounds to be evaluated for
a given problem instance is only limited by the
computational power available. The bounds are based on
decomposing the ring into sets of nodes arranged in a
path, and adopting the locally optimal topology within
each set. Our approach can be applied to many virtual
topology problems on rings. The upper bounds we obtain
also provide a useful series of heuristic solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{LeBoudec:2001:SPV,
author = "Jean-Yves {Le Boudec}",
title = "Some properties of variable length packet shapers",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "175--183",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The min-plus theory of greedy shapers has been
developed after Cruz's results on the calculus of
network delays. An example of greedy shaper is the
buffered leaky bucket controller. The theory of greedy
shapers establishes a number of properties; for
example, re-shaping keeps original arrival constraints.
The existing theory applies in all rigor either to
fluid systems, or to packets of constant size such as
ATM. For variable length packets, the distortion
introduced by packetization affects the theory, which
is no longer valid. Chang has introduced the concept of
packetizer, which models the effect of variable length
packets, and has also developed a max-plus theory of
shapers. In this paper, we start with the min-plus
theory, and obtain results on greedy shapers for
variable length packets which are not readily explained
with the max-plus theory of Chang. We show a
fundamental result, namely, the min-plus representation
of a packetized greedy shaper. This allows us to prove
that, under some assumptions, re-shaping a flow of
variable length packets does keep original arrival
constraints. However, we show on some examples that if
the assumptions are not satisfied, then the property
may not hold any more. We also demonstrate the
equivalence of implementing a buffered leaky bucket
controller based on either virtual finish times or on
bucket replenishment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "leaky bucket; min-plus algebra; network calculus;
shaper",
}
@Article{Chang:2001:PMI,
author = "Cheng-Shang Chang and Yuh-ming Chiu and Wheyming Tina
Song",
title = "On the performance of multiplexing independent
regulated inputs",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "184--193",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the performance analysis
problem for a work conserving link with a large number
of independent regulated inputs. For such a problem, we
derive simple stochastic bounds under a general traffic
constraint for the inputs. The bound for queue length
is shown to be a stochastic extension of the
deterministic worst case bound and it is asymptotically
tighter than the bound in Kesidis and Konstantopoulos
[23]. We also test the bound by considering periodic
inputs with independent starting phases. Based on
Sanov's theorem and importance sampling, we propose a
fast simulation algorithm that achieves significant
variance reduction. The simulations results are
compared with our stochastic bound and the bound in
[23].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fast simulation; multiplexing; performance bounds",
}
@Article{Shuf:2001:CMB,
author = "Yefim Shuf and Mauricio J. Serrano and Manish Gupta
and Jaswinder Pal Singh",
title = "Characterizing the memory behavior of {Java}
workloads: a structured view and opportunities for
optimizations",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "194--205",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378783",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the memory behavior of important
Java workloads used in benchmarking Java Virtual
Machines (JVMs), based on instrumentation of both
application and library code in a state-of-the-art JVM,
and provides structured information about these
workloads to help guide systems' design. We begin by
characterizing the inherent memory behavior of the
benchmarks, such as information on the breakup of heap
accesses among different categories and on the hotness
of references to fields and methods. We then provide
detailed information about misses in the data TLB and
caches, including the distribution of misses over
different kinds of accesses and over different methods.
In the process, we make interesting discoveries about
TLB behavior and limitations of data prefetching
schemes discussed in the literature in dealing with
pointer-intensive Java codes. Throughout this paper, we
develop a set of recommendations to computer architects
and compiler writers on how to optimize computer
systems and system software to run Java programs more
efficiently. This paper also makes the first attempt to
compare the characteristics of SPECjvm98 to those of a
server-oriented benchmark, pBOB, and explain why the
current set of SPECjvm98 benchmarks may not be adequate
for a comprehensive and objective evaluation of JVMs
and just-in-time (JIT) compilers. We discover that the
fraction of accesses to array elements is quite
significant, demonstrate that the number of `hot spots'
in the benchmarks is small, and show that field
reordering cannot yield significant performance gains.
We also show that even a fairly large L2 data cache is
not effective for many Java benchmarks. We observe that
instructions used to prefetch data into the L2 data
cache are often squashed because of high TLB miss rates
and because the TLB does not usually have the
translation information needed to prefetch the data
into the L2 data cache. We also find that co-allocation
of frequently used method tables can reduce the number
of TLB misses and lower the cost of accessing type
information block entries in virtual method calls and
runtime type checking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sohoni:2001:SMS,
author = "Sohum Sohoni and Rui Min and Zhiyong Xu and Yiming
Hu",
title = "A study of memory system performance of multimedia
applications",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "206--215",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378784",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multimedia applications are fast becoming one of the
dominating workloads for modern computer systems. Since
these applications normally have large data sets and
little data-reuse, many researchers believe that they
have poor memory behavior compared to traditional
programs, and that current cache architectures cannot
handle them well. It is therefore important to
quantitatively characterize the memory behavior of
these applications in order to provide insights for
future design and research of memory systems. However,
very few results on this topic have been published.
This paper presents a comprehensive research on the
memory requirements of a group of programs that are
representative of multimedia applications. These
programs include a subset of the popular MediaBench
suite and several large multimedia programs running on
the Linux, Windows NT and Tru UNIX operating systems.
We performed extensive measurement and trace-driven
simulation experiments. We then compared the memory
utilization of these programs to that of SPECint95
applications. We found that multimedia applications
actually have better memory behavior than SPECint95
programs. The high cache hit rates of multimedia
applications can be contributed to the following three
factors. Most multimedia applications apply block
partitioning algorithms to the input data, and work on
small blocks of data that easily fit into the cache.
Secondly, within these blocks, there is significant
data reuse as well as spatial locality. The third
reason is that a large number of references generated
by multimedia applications are to their internal data
structures, which are relatively small and can also
easily fit into reasonably-sized caches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bu:2001:FPAb,
author = "Tian Bu and Don Towsley",
title = "Fixed point approximations for {TCP} behavior in an
{AQM} network",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "216--225",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378786",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we explore the use of fixed point
methods to evaluate the performance of a large
population of TCP flows traversing a network of routers
implementing active queue management (AQM) such as RED
(random early detection). Both AQM routers that drop
and that mark packets are considered along with
infinite and finite duration TCP flows. In the case of
finite duration flows, we restrict ourselves to
networks containing one congested router. In all cases,
we formulate a fixed point problem with the router
average queue lengths as unknowns. Once these are
obtained, other metrics such as router loss
probability, TCP flow throughput, TCP flow end-to-end
loss rates, average round trip time, and average
session duration are easily obtained. Comparison with
simulation for a variety of scenarios shows that the
model is accurate in its predictions (mean errors less
than 5\%). Last, we establish monotonicity properties
exhibited by the solution for a single congested router
that explains several interesting observations, such as
TCP SACK suffers higher loss than TCP Reno.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Low:2001:UTV,
author = "Steven H. Low and Larry Peterson and Limin Wang",
title = "Understanding {TCP Vegas}: a duality model",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "226--235",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378787",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a model of the TCP Vegas
congestion control mechanism as a distributed
optimization algorithm. Doing so has three important
benefits. First, it helps us gain a fundamental
understanding of why TCP Vegas works, and an
appreciation of its limitations. Second, it allows us
to prove that Vegas stabilizes at a weighted
proportionally fair allocation of network capacity when
there is sufficient buffering in the network. Third, it
suggests how we might use explicit feedback to allow
each Vegas source to determine the optimal sending rate
when there is insufficient buffering in the network. We
present simulation results that validate our
conclusions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Talim:2001:CRW,
author = "J. Talim and Z. Liu and Ph. Nain and E. G. {Coffman,
Jr.}",
title = "Controlling the robots of {Web} search engines",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "236--244",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378788",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Robots are deployed by a Web search engine for
collecting information from different Web servers in
order to maintain the currency of its data base of Web
pages. In this paper, we investigate the number of
robots to be used by a search engine so as to maximize
the currency of the data base without putting an
unnecessary load on the network. We adopt a
finite-buffer queueing model to represent the system.
The arrivals to the queueing system are Web pages
brought by the robots; service corresponds to the
indexing of these pages. Good performance requires that
the number of robots, and thus the arrival rate of the
queueing system, be chosen so that the indexing queue
is rarely starved or saturated. Thus, we formulate a
multi-criteria stochastic optimization problem with the
loss rate and empty-buffer probability being the
criteria. We take the common approach of reducing the
problem to one with a single objective that is a linear
function of the given criteria. Both static and dynamic
policies can be considered. In the static setting the
number of robots is held fixed; in the dynamic setting
robots may be re-activated/de-activated as a function
of the state. Under the assumption that arrivals form a
Poisson process and that service times are independent
and exponentially distributed random variables, we
determine an optimal decision rule for the dynamic
setting, i.e., a rule that varies the number of robots
in such a way as to minimize a given linear function of
the loss rate and empty-buffer probability. Our results
are compared with known results for the static case. A
numerical study indicates that substantial gains can be
achieved by dynamically controlling the activity of the
robots.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Markov decision process; queues; web robots; Web
search engines",
}
@Article{Smith:2001:WTI,
author = "F. Donelson Smith and F{\'e}lix Hern{\'a}ndez Campos
and Kevin Jeffay and David Ott",
title = "What {TCP\slash IP} protocol headers can tell us about
the {Web}",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "245--256",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378789",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We report the results of a large-scale empirical study
of web traffic. Our study is based on over 500 GB of
TCP/IP protocol-header traces collected in 1999 and
2000 (approximately one year apart) from the high-speed
link connecting The University of North Carolina at
Chapel Hill to its Internet service provider. We also
use a set of smaller traces from the NLANR repository
taken at approximately the same times for comparison.
The principal results from this study are: (1)
empirical data suitable for constructing traffic
generating models of contemporary web traffic, (2) new
characterizations of TCP connection usage showing the
effects of HTTP protocol improvement, notably
persistent connections ({\em e.g.}, about 50\% of web
objects are now transferred on persistent connections),
and (3) new characterizations of web usage and content
structure that reflect the influences of `banner ads,'
server load balancing, and content distribution. A
novel aspect of this study is a demonstration that a
relatively light-weight methodology based on passive
tracing of only TCP/IP headers and off-line analysis
tools can provide timely, high quality data about web
traffic. We hope this will encourage more researchers
to undertake on-going data collection and provide the
research community with data about the rapidly evolving
characteristics of web traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nahum:2001:EWA,
author = "Erich M. Nahum and Marcel-Catalin Rosu and Srinivasan
Seshan and Jussara Almeida",
title = "The effects of wide-area conditions on {WWW} server
performance",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "257--267",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378790",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "WWW workload generators are used to evaluate web
server performance, and thus have a large impact on
what performance optimizations are applied to servers.
However, current benchmarks ignore a crucial component:
how these servers perform in the environment in which
they are intended to be used, namely the wide-area
Internet. This paper shows how WAN conditions can
affect WWW server performance. We examine these effects
using an experimental test-bed which emulates WAN
characteristics in a live setting, by introducing
factors such as delay and packet loss in a controlled
and reproducible fashion. We study how these factors
interact with the host TCP implementation and what
influence they have on web server performance. We
demonstrate that when more realistic wide-area
conditions are introduced, servers exhibit very
different performance properties and scaling behaviors,
which are not exposed by existing benchmarks running on
LANs. We show that observed throughputs can give
misleading information about server performance, and
thus find that maximum throughput, or capacity, is a
more useful metric. We find that packet losses can
reduce server capacity by as much as 50 percent and
increase response time as seen by the client. We show
that using TCP SACK can reduce client response time,
without reducing server capacity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nain:2001:MMQ,
author = "Philippe Nain and Redusindo N{\'u}{\~n}ez-Queija",
title = "A {M/M/1} queue in a semi-{Markovian} environment",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "268--278",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378791",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an M/M/1 queue in a semi-Markovian
environment. The environment is modeled by a two-state
semi-Markov process with arbitrary sojourn time
distributions $ F_0 (x) $ and $ F_1 (x) $. When in
state $ i = 0, 1 $, customers are generated according
to a Poisson process with intensity $ \lambda_i $ and
customers are served according to an exponential
distribution with rate $ \mu_i $. Using the theory of
Riemann--Hilbert boundary value problems we compute the
$z$-transform of the queue-length distribution when
either $ F_0 (x)$ or $ F_1 (x)$ has a rational
Laplace--Stieltjes transform and the other may be a
general --- possibly heavy-tailed --- distribution. The
arrival process can be used to model bursty traffic
and/or traffic exhibiting long-range dependence, a
situation which is commonly encountered in networking.
The closed-form results lend themselves for numerical
evaluation of performance measures, in particular the
mean queue-length.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aueueing; bursty traffic; communication networks;
heavy-tailed distribution; long-range dependence;
Riemann--Hilbert boundary value problem; stochastic
modeling",
}
@Article{Bansal:2001:ASS,
author = "Nikhil Bansal and Mor Harchol-Balter",
title = "Analysis of {SRPT} scheduling: investigating
unfairness",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "279--290",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378792",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Shortest-Remaining-Processing-Time (SRPT)
scheduling policy has long been known to be optimal for
minimizing mean response time (sojourn time). Despite
this fact, SRPT scheduling is rarely used in practice.
It is believed that the performance improvements of
SRPT over other scheduling policies stem from the fact
that SRPT unfairly penalizes the large jobs in order to
help the small jobs. This belief has led people to
instead adopt `fair' scheduling policies such as
Processor-Sharing (PS), which produces the same
expected slowdown for jobs of all sizes. This paper
investigates formally the problem of unfairness in SRPT
scheduling as compared with PS scheduling. The analysis
assumes an M/G/1 model, and emphasizes job size
distributions with a heavy-tailed property, as are
characteristic of empirical workloads. The analysis
shows that the degree of unfairness under SRPT is
surprisingly small. The M/G/1/SRPT and M/G/1/PS queues
are also analyzed under overload and closed-form
expressions for mean response time as a function of job
size are proved in this setting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luthi:2001:IPC,
author = "Johannes L{\"u}thi and Catalina M. Llad{\'o}",
title = "Interval parameters for capturing uncertainties in an
{EJB} performance model",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "291--300",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378794",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Exact as well as approximate analytical solutions for
quantitative performance models of computer systems are
usually obtained by performing a series of arithmetical
operations on the input parameters of the model.
However, especially during early phases of system
design and implementation, not all the parameter values
are usually known exactly. In related research
contributions, intervals have been proposed as a means
to capture parameter uncertainties. Furthermore,
methods to adapt existing solution algorithms to
parameter intervals have been discussed. In this paper
we present the adaptation of an existing performance
model to parameter intervals. The approximate solution
of a queueing network modelling an Enterprise JavaBeans
server implementation is adapted to interval arithmetic
in order to represent the uncertainty in some of the
parameters of the model. A new interval splitting
method is applied to obtain reasonable tight
performance measure intervals. Monotonicity properties
of intermediate computation results are exploited to
achieve a more efficient interval solution. In
addition, parts of the original solution algorithm are
modified to increase the efficiency of the
corresponding interval arithmetical solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed systems; enterprise JavaBeans; interval
parameters; parameter uncertainties; performance
models; queueing",
}
@Article{El-Sayed:2001:ASS,
author = "Hesham El-Sayed and Don Cameron and Murray Woodside",
title = "Automation support for software performance
engineering",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "301--311",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378799",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To evaluate the performance of a software design one
must create a model of the software, together with the
execution platform and configuration. Assuming that the
`platform': (processors, networks, and operating
systems) are specified by the designer, a good
`configuration' (the allocation of tasks to processors,
priorities, and other aspects of the installation) must
be determined. Finding one may be a barrier to rapid
evaluation; it is a more serious barrier if there are
many platforms to be considered. This paper describes
an automated heuristic procedure for configuring a
software system described by a layered architectural
software model, onto a set of processors, and choosing
priorities. The procedure attempts to meet a
soft-real-time performance specification, in which any
number of scenarios have deadlines which must be
realized some percentage of the time. It has been
successful in configuring large systems with both soft
and hard deadlines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bradshaw:2001:PBP,
author = "Michael K. Bradshaw and Bing Wang and Subhabrata Sen
and Lixin Gao and Jim Kurose and Prashant Shenoy and
Don Towsley",
title = "Periodic broadcast and patching services:
implementation, measurement, and analysis in an
{Internet} streaming video testbed",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "312--313",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378801",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2001:TSR,
author = "Yang Richard Yang and Xiaozhou Li and Simon S. Lam and
Xincheng Zhang",
title = "Towards scalable and reliable group key management",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "314--315",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bremler-Barr:2001:RPC,
author = "Anat Bremler-Barr and Yehuda Afek and Haim Kaplan and
Edith Cohen and Michael Merritt",
title = "Restoration path concatenation: fast recovery of
{MPLS} paths",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "316--317",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new general theory about {\em restoration\/} of
network paths is first introduced. The theory pertains
to restoration of shortest paths in a network following
failure, e.g., we prove that a shortest path in a
network after removing $k$ edges is the concatenation
of at most $k$ + 1 shortest paths in the original
network. The theory is then combined with efficient
path concatenation techniques in MPLS (multi-protocol
label switching), to achieve powerful schemes for
restoration in MPLS based networks. We thus transform
MPLS into a flexible and robust method for forwarding
packets in a network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Savvides:2001:MNW,
author = "Andreas Savvides and Sung Park and Mani B.
Srivastava",
title = "On modeling networks of wireless microsensors",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "318--319",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsigas:2001:EPN,
author = "Philippas Tsigas and Yi Zhang",
title = "Evaluating the performance of non-blocking
synchronization on shared-memory multiprocessors",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "320--321",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel programs running on shared memory
multiprocessors coordinate via shared data
objects/structures. To ensure the consistency of the
shared data structures, programs typically rely on some
forms of software synchronisations. Unfortunately
typical software synchronisation mechanisms usually
result in poor performance because they produce large
amounts of memory and interconnection network
contention and, more significantly, because they
produce convoy effects that degrade significantly in
multiprogramming environments: if one process holding a
lock is preempted, other processes on different
processors waiting for the lock will not be able to
proceed. Researchers have introduced non-blocking
synchronisation to address the above problems.
Non-blocking implementations allow multiple tasks to
access a shared object at the same time, but without
enforcing mutual exclusion to accomplish this. However,
its performance implications are not well understood on
modern systems or on real applications. In this paper
we study the impact of the non-blocking synchronisation
on parallel applications running on top of a modern, 64
processor, cache-coherent, shared memory multiprocessor
system: the SGI Origin 2000. Cache-coherent non-uniform
memory access (ccNUMA) shared memory multiprocessor
systems have attracted considerable research and
commercial interest in the last years. In addition to
the performance results on a modern system, we also
investigate the key synchronisation schemes that are
used in multiprocessor applications and their efficient
transformation to non-blocking ones. Evaluating the
impact of the synchronisation performance on
applications is important for several reasons. First,
micro-benchmarks can not capture every aspect of
primitive performance. It is hard to predict the
primitive impact on the application performance. For
example, a look or barrier that generates a lot of
additional network traffic might have little impact on
applications. Second, even in applications that spend
significant time in synchronisation operations, the
synchronisation time might be dominated by wait time
due to load imbalance and lock serialisation in the
application, which better implementations of
synchronisation may not be helpful in reducing. Third,
micro-benchmarks rarely capture (generate) scenarios
that occur in real applications.\par
We evaluated the benefits of non-blocking
synchronisation in a range of applications running on
top of modern realizations of shared-memory
multiprocessors, a 64 processor SGI Origin 2000. In
this evaluation, (i) we used a big set of applications
with different communication characteristics, making
sure that we include also applications that do not
spend a lot of time in synchronisation, (ii) we also
modified all the lock-based synchronisation points of
these applications when possible. The goal of our work
was to provide an in depth understanding of how
non-blocking can improve the performance of modern
parallel applications. More specifically, the main
issues addressed in this paper include: (i) The
architectural implications of the ccNUMA on the design
of non-blocking synchronisation. (ii) The
identification of the basic locking operations that
parallel programmers use in their applications. (iii)
The efficient non-blocking implementation of these
synchronisation operations. (iv) The experimental
comparison of the lock-based and lock-free versions of
the respective applications on a cache-coherent
non-uniform memory access shared memory multiprocessor
system. (v) The identification of the structural
differences between applications that benefit more from
non-blocking synchronisation than others. We selected
to examine these issues, on a 64 processor SGI Origin
2000 multiprocessor system. This machine is attractive
for the study because it provides an aggressive
communication architecture and support for both in
cache and at memory synchronisation primitives. It
should be clear however that the conclusions and the
methods presented in this paper have general
applicability in other realizations of cache-coherent
non-uniform memory access machines. Our results can
benefit the parallel programmers in two ways. First, to
understand the benefits of non-blocking
synchronisation, and then to transform some typical
lock-based synchronisation operations that are probably
used in their programs to non-blocking ones by using
the general translations that we provide in this
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ng:2001:OHP,
author = "Wee Teck Ng and Bruce K. Hillyer",
title = "Obtaining high performance for storage outsourcing",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "322--323",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The viability of storage outsourcing is critically
dependent on the access performance of remote storage.
We study this issue by measuring the behavior of a
broad variety of I/O-intensive benchmarks as they
access remote storage over an IP network. We measure
the effect of network latencies that correspond to
distances ranging from a local neighborhood to halfway
across a continent. We then measure the effect of
latency-hiding mechanisms. Our results indicate that,
in many cases, the adverse effects of network delay can
be rendered inconsequential by clever file system and
operating system techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Padamanabban:2001:DGL,
author = "Venkata N. Padamanabban and Lealkshminarayanan
Subramanian",
title = "Determining the geographic location of {Internet}
hosts",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "324--325",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of determining the geographic
location of an Internet host knowing only its IP
address. We have developed three distinct techniques,
{\em GeoTrack}, {\em GeoPing}, and {\em GeoCluster}, to
address this problem. These techniques exploit
information derived from the DNS, network delay
measurements, and inter-domain routing. We have
evaluated our techniques using extensive and varied
datasets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mandjes:2001:LCA,
author = "Michel Mandjes and Iraj Saniee and Alexander Stolyar",
title = "Load characterization and anomaly detection for voice
over {IP} traffic",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "326--327",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of traffic anomaly detection
in IP networks. Traffic anomalies arise when there is
overload due to failures in a network. We present
general formulae for the variance of the cumulative
traffic over a fixed time interval and show how the
derived analytical expression simplifies for the case
of voice over IP traffic, the focus of this paper. To
detect load anomalies, we show it is sufficient to
consider cumulative traffic over relatively long
intervals such as 5 minutes. This approach
substantially extends the current practice in IP
network management where only the first order
statistics and fixed thresholds are used to identify
abnormal behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "SNMP-based load characterization; variance estimation;
VoIP traffic anomaly detection",
}
@Article{Downey:2001:SCF,
author = "Allen B. Downey",
title = "The structural cause of file size distributions",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "328--329",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a user model that explains the shape of the
distribution of file sizes in local file systems and in
the World Wide Web. We examine evidence from 562 file
systems, 38 web clients and 6 web servers, and find
that the model is a good description of these systems.
These results cast doubt on the widespread view that
the distribution of file sizes is long-tailed and that
long-tailed distributions are the cause of
self-similarity in the Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file sizes; long-tailed distributions;
self-similarity",
}
@Article{Bhargava:2001:UAM,
author = "Rishi Bhargava and Ashish Goel and Adam Meyerson",
title = "Using approximate majorization to characterize
protocol fairness",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "330--331",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mellor-Crummey:2001:PUI,
author = "John Mellor-Crummey and Robert Fowler and David
Whalley",
title = "On providing useful information for analyzing and
tuning applications",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "332--333",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Application performance tuning is a complex process
that requires correlating many types of information
with source code to locate and analyze performance
problems bottle-necks. Existing performance tools don't
adequately support this process in one or more
dimensions. We describe two performance tools, {\em
MHsim\/} and {\em HPCView}, that we built to support
our own work on data layout and optimizing compilers.
Both tools report their results in scope-hierarchy
views of the corresponding source code and produce
their output as HTML databases that can be analyzed
portably and collaboratively using a commodity
browser.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shahabi:2001:ATE,
author = "Cyrus Shahabi and Mohammad R. Kolahdouzan and Greg
Barish and Roger Zimmermann and Didi Yao and Kun Fu and
Lingling Zhang",
title = "Alternative techniques for the efficient acquisition
of haptic data",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "334--335",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Immersive environments are those that surround users
in an artificial world. These environments consist of a
composition of various types of immersidata: unique
data types that are combined to render a virtual
experience. Acquisition, for storage and future
querying, of information describing sessions in these
environments is challenging because of the real-time
demands and sizable amounts of data to be managed. In
this paper, we summarize a comparison of techniques for
achieving the efficient acquisition of one type of
immersidata, the haptic data type, which describes the
movement, rotation, and force associated with
user-directed objects in an immersive environment. In
addition to describing a general process for real-time
sampling and recording of this type of data, we propose
three distinct sampling strategies: fixed, grouped, and
adaptive. We conducted several experiments with a real
haptic device and found that there are tradeoffs
between the accuracy, efficiency, and complexity of
implementation for each of the proposed techniques.
While it is possible to use any of these approaches for
real-time haptic data acquisition, we found that an
adaptive sampling strategy provided the most efficiency
without significant loss in accuracy. As immersive
environments become more complex and contain more
haptic sensors, techniques such as adaptive sampling
can be useful for improving scalability of real-time
data acquisition.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "haptic data acquisition; immersidata; immersive
technologies; sampling",
}
@Article{Dinda:2001:OPR,
author = "Peter A. Dinda",
title = "Online prediction of the running time of tasks",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "336--337",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378836",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almeida:2001:ARB,
author = "Virgil{\'\i}o Almeida and Daniel Menasc{\'e} and
Rudolf Riedi and Fl{\'a}via Peligrinelli and Rodrigo
Fonseca and Wagner {Meira, Jr.}",
title = "Analyzing robot behavior in e-business sites",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "338--339",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378838",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almeida:2001:CUA,
author = "Jussara M. Almeida and Jeffrey Krueger and Mary K.
Vernon",
title = "Characterization of user access to streaming media
files",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "340--341",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2001:PME,
author = "Thomas Bonald and James Roberts",
title = "Performance modeling of elastic traffic in overload",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "342--343",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/378420.378845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While providers generally aim to avoid congestion by
adequate provisioning, overload can clearly occur on
certain network links. In this paper we propose some
simple preliminary models for an overloaded link
accounting for user impatience and reattempt
behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qiu:2001:FFI,
author = "Lili Qiu and George Varghese and Subhash Suri",
title = "Fast firewall implementations for software-based and
hardware-based routers",
journal = j-SIGMETRICS,
volume = "29",
number = "1",
pages = "344--345",
month = jun,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/384268.378849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:34:55 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Routers must perform packet classification at high
speeds to efficiently implement functions such as
firewalls and diffserv. Classification can be based on
an arbitrary number of fields in the packet header.
Performing classification quickly on an arbitrary
number of fields is known to be difficult, and has poor
worst-case complexity. In this paper, we re-examine two
basic mechanisms that have been dismissed in the
literature as being too inefficient: backtracking
search and set pruning tries. We find using real
databases that the time for backtracking search is much
better than the worst-case bound; instead of $ \Omega
((\log N)^{k - 1}) $, the search time is only roughly
twice the optimal search time. Similarly, we find that
set pruning tries (using a DAG optimization) have much
better storage costs than the worst-case bound. We also
propose several new techniques to further improve the
two basic mechanisms. Our major ideas are (i)
backtracking search on a small memory budget, (ii) a
novel compression algorithm, (iii) pipelining the
search, (iv) the ability to trade-off smoothly between
backtracking and set pruning, and (v) algorithms to
effectively make use of hardware if hardware is
available. We quantify the performance gain of each
technique using real databases. We show that on real
firewall databases our schemes, with the accompanying
optimizations, are close to optimal in time and
storage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2001:CRT,
author = "K. Kant and Prasant Mohapatra",
title = "Current research trends in {Internet} servers",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "5--7",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572318",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dalal:2001:OSO,
author = "Amy Csizmar Dalal and Scott Jordan",
title = "An optimal service ordering for a {World Wide Web}
server",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "8--13",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider alternative service policies in a web
server with impatient users. User-perceived performance
is modeled as an exponentially decaying function of the
user's waiting time, reflecting the probability that
the user aborts the download before the page is
completely received. The web server is modeled as a
single server queue, with Poisson arrivals and
exponentially distributed file lengths. The server
objective is to maximize average revenue per unit time,
where each user is assumed to pay a reward proportional
to the perceived performance. When file lengths are
i.i.d., we prove that the optimal service policy is
greedy, namely that the server should choose the job
with the highest potential reward. However, when file
lengths are independently drawn from a set of
exponential distributions, we show the optimal policy
need not be greedy; in fact, processor sharing policies
sometimes outperform the best greedy policy in this
case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cardellini:2001:WSS,
author = "Valeria Cardellini and Emiliano Casalicchio and
Michele Colajanni and Marco Mambelli",
title = "{Web} switch support for differentiated services",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "14--19",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572320",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the Web is becoming a medium widely used as a
preferential channel for critical information exchange,
business, and e-commerce, it is necessary to enable
differentiated service mechanisms not only at the
network but also at the Web server level. In this
paper, we propose the concept of {\em Quality of Web
Services\/} (QoWS), which is inspired by the basic
principles of network QoS, while looking at the server
components of the Web system. In particular, we analyze
how QoWS principles can be realized in a Web site
hosted on a Web-server cluster that is, an architecture
composed by multiple Web servers locally distributed
and a single front-end node, called a Web switch. We
propose a new centralized policy, namely {\em
DynamicPartitioning}, which satisfies through dynamic
server partition all basic QoS principles for a Web
switch working at application level. We compare it
against other proposed classes of policies which
implement part or all of basic QoS principles. We
demonstrate through a large set of simulation
experiments under a realistic workload model that
DynamicPartitioning always achieves superior
performance for the high service class, at the price of
some penalty for low service classes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed systems; load sharing; performance
evaluation; quality of service",
}
@Article{Voigt:2001:KBC,
author = "Thiemo Voigt and Per Gunningberg",
title = "Kernel-based control of persistent {Web} server
connections",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "20--25",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several overload admission control architectures have
been developed to protect web servers from overload.
Some of these architectures base their admission
decision on information found in the HTTP header. In
this context, persistent connections represent a
challenging problem since the HTTP header of the first
request does not reveal any information about the
resource consumption of the requests that might follow
on the same connection. In this paper, we present an
architecture that prevents uncontrollable server
overload caused by persistent connections. We evaluate
our approach by various experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2001:BPI,
author = "Jun Wang and Rui Min and Zhuying Wu and Yiming Hu",
title = "Boosting {I/O} performance of {Internet} servers with
user-level custom file systems",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "26--31",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous studies have shown that disk I/O times are
one of the major performance bottlenecks of Internet
servers such as proxy cache servers. Most conventional
file systems do not work well for such systems because
of their very high overheads. Although Special-purpose
operating systems may achieve high performance, it is
very difficult and expensive to design and maintain.
They also have very poor portability. In this paper we
propose to built user-space, customized file systems
for Internet servers so as to achieve high-performance,
low-implementation-cost and good portability at the
same time. To provide an example of such systems, we
presented a novel scheme called {\em WPSFS\/} that can
drastically improve I/O performance of proxy servers
and other applications. WPSFS is an application-level
software component of a proxy server which manages data
on a raw disk or disk partition. Since the entire
system runs in the user space, it is easy and
inexpensive to implement. It also has good portability
and maintainability. With efficient in-memory meta-data
data structures and a novel file system called {\em
Page-structured file system(PFS)}, WPSFS achieves 9-20
times better I/O performance than the state-of-the-art
SQUID server running on a Unix Fast File System, and
4-10 times better than the improved SQUIDML.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2001:CDP,
author = "Xin Chen and Xiaodong Zhang",
title = "Coordinated data prefetching by utilizing reference
information at both proxy and {Web} servers",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "32--38",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Existing prefetching techniques rely on server-based,
proxy-based, or client-based reference access
information. Although Web servers may provide accurate
access information, our studies show that significant
communication overhead can be involved by sending
unnecessary reference information to clients or/and
proxy servers. Our study also shows that prediction
accuracy of proxy-based prefetching can be
significantly limited without input of Web servers. We
propose a {\em coordinated proxy-server prefetching
technique\/} that adaptively utilizes the reference
information and coordinates prefetching activities at
both proxy and web servers. In our design, the
reference access information stored in proxy servers
will be the main source serving data prefetching for
groups of clients, each of whom shares the common
surfing interests. The access information in the web
server will be used to serve data prefetching only for
data objects that are not qualified for proxy-based
prefetching. Conducting trace-driven simulations, we
show that both hit ratios and byte hit ratios
contributed from coordinated proxy-server prefetching
are up to 88\% higher than that from proxy-based
prefetching, and they are comparable to the ratios from
server-based prefetching with a difference of 5\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardaiz:2001:IST,
author = "Oscar Ardaiz and Felix Freitag and Leandro Navarro",
title = "Improving the service time of {Web} clients using
server redirection",
journal = j-SIGMETRICS,
volume = "29",
number = "2",
pages = "39--44",
month = sep,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/572317.572324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes and evaluates experimentally a
web server infrastructure, which consists of a small
number of servers that redirect client requests based
on the estimated client service time. The web servers
have replicated content, are located in geographically
different regions, and redirect clients between
servers. The web servers use metrics obtained from
server logs to estimate the service time of a client.
Based on the estimated service time the server
redirects the web client. The implementation of the
measurement and redirection mechanism is done in the
web servers and is independent of the clients. Using
server logs the measuring mechanism does not introduce
traffic into the network. We have experimentally
evaluated the proposed web server infrastructure. In
our experiments the client service time improved from 4
to 40\% when using the proposed mechanism. The web
server infrastructure could be applied to improve the
service time of selected clients, which frequently
access a web server to retrieve a significant amount of
data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2001:GGI,
author = "Shudong Jin and Azer Bestavros",
title = "{GISMO}: a {Generator of Internet Streaming Media
Objects} and workloads",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "2--10",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507554",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a tool called GISMO (Generator of
Internet Streaming Media Objects and workloads). GISMO
enables the specification of a number of streaming
media access characteristics, including object
popularity, temporal correlation of request, seasonal
access patterns, user session durations, user
inter-activity times, and variable bit-rate (VBR)
self-similarity and marginal distributions. The
embodiment of these characteristics in GISMO enables
the generation of realistic and scalable request
streams for use in the benchmarking and comparative
evaluation of Internet streaming media delivery
techniques. To demonstrate the usefulness of GISMO, we
present a case study that shows the importance of
various workload characteristics in determining the
effectiveness of proxy caching and server patching
techniques in reducing bandwidth requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2001:SIWb,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA 2001)}",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "11--11",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bansal:2001:AMG,
author = "Nikhil Bansal and Mor Harchol-Balter",
title = "Analysis of {M/G/1/SRPT} under transient overload",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "12--14",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507557",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This short paper contains an approximate analysis for
the M/G/1/SRPT queue under alternating periods of
overload and low load. The result in this paper along
with several other results on systems under transient
overload are contained in our recent technical report
[2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2001:ACA,
author = "E. Bachmat",
title = "Average case analysis for batched disk scheduling and
increasing subsequences",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "15--16",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riabov:2001:SPT,
author = "Anton Riabov and Jay Sethuraman",
title = "Scheduling periodic task graphs with communication
delays",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "17--18",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of finding an optimal
assignment of tasks, which constitute a parallel
application, to an unlimited number of identical
processors. The precedence constraints among the tasks
are given in the form of a directed acyclic graph
(DAG). We are given processing times for each task and
the communication delays between precedence-constrained
tasks, which are incurred if the corresponding tasks
are executed on different processors. Furthermore, the
system must be able to process real-time periodic input
with a fixed period. This problem occurs, for example,
in multiprocessor scheduling of video processing
applications, where each frame has to be processed by a
number of software filters, and some filters use data
pre-processed by other filters, thus forming a DAG of
data dependencies. We formulate several variants of
this problem, and briefly discuss some of our results
for special precedence graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fourneau:2001:GNR,
author = "Jean-Michel Fourneau and Erol Gelenbe",
title = "{G}-networks with resets",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "19--20",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gelenbe Networks (G-networks) are a class of queuing
models which include new types of customers called
`signals,' which are either `negative customers' and
`triggers' [1, 2]. Queuing networks typically do not
have provisions for some customers being used to
eliminate other customers, or to redirect other
customers among the queues. In other words, customers
in traditional queuing networks cannot exert direct
control on other customers. G-network models overcome
some of these limitations and still preserve the
computationally attractive `product form' property of
certain Markovian queuing networks. In addition to
ordinary customers, G-networks contain `negative
customers' which eliminate normal customers, and
`triggers' which move other customers from some queue
to another [4, 5]. Multiple class versions of these
models are discussed in [7, 8], and in [9] many
additional results are provided. These queuing networks
have generated much interest in the literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shalmon:2001:QAP,
author = "Michael Shalmon",
title = "Queueing analysis for polling and prioritized service
of aggregated regenerative variable rate {ON-OFF}
traffic sources",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "20--20",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bain:2001:MPD,
author = "Alan Bain and Peter Key",
title = "Modelling the performance of distributed admission
control for adaptive applications",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "21--22",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507562",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2001:LBB,
author = "Cheng-Shang Chang and Duan-Shin Lee and Ching-Ming
Lien",
title = "Load balanced {Birkhoff--von Neumann} switches with
resequencing",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "23--24",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In [2], we proposed the load balanced Birkhoff--von
Neumann switch with one-stage buffering (see Figure 1).
Such a switch consists of two stages of crossbar
switching fabrics and one stage of buffering. The
buffer at the input port of the second stage uses the
Virtual Output Queueing (VOQ) technique to solve the
problem of head-of-line blocking. In such a switch,
packets are of the same size. Also, time is slotted and
synchronized so that exactly one packet can be
transmitted within a time slot. In a time slot, both
crossbar switches set up connection patterns
corresponding to permutation matrices that are
periodically generated from a one-cycle permutation
matrix.\par
The reasoning behind such a switch architecture is as
follows: since the connection patterns are periodic,
packets from the same input port of the first stage are
distributed in a round-robin fashion to the second
stage according to their arrival times. Thus, the first
stage performs load balancing for the incoming traffic.
As the traffic coming into the second stage is load
balanced, it suffices to use simple periodic connection
patterns to perform switching at the second stage. This
is shown in [2] as a special case of the original
Birkhoff-von Neumann decomposition used in [1]. There
are several advantages of using such an architecture,
including scalability, low hardware complexity, 100\%
throughput, low average delay in heavy load and bursty
traffic, and efficient buffer usage. However, the main
drawback of the load balanced Birkhoff-von Neumann
switch with one-stage buffering is that packets might
be out of sequence.\par
The main objective of this paper is to solve the
out-of-sequence problem that occurs in the load
balanced Birkhoff-von Neumann switch with one-stage
buffering. One quick fix is to add a
resequencing-and-output buffer after the second stage.
However, as packets are distributed according to their
{\em arrival times\/} at the first stage, there is no
guarantee on the size of the resequencing-and-output
buffer to prevent packet losses. For this, one needs to
distributed packets according to their {\em flows}, as
indicated in the paper by Iyer and McKeown [5]. This is
done by adding a flow splitter and a load-balancing
buffer in front of the first stage (see Figure 2). For
an $ N \times N $ switch, the load-balancing buffer at
each input port of the first stage consists of $N$
virtual output queues (VOQ) destined for the $N$ output
ports of that stage. Packets form the same {\em flow\/}
are split in the round-robin fashion to the $N$ virtual
output queues and scheduled under the First Come First
Served (FCFS) policy. By so doing, load balancing can
be achieved for each flow as packets from the same flow
are split almost evenly to the input ports of the
second stage. More importantly, as pointed out in [5],
the delay and the buffer size of the load-balancing
buffer are bounded by constants that only depend on the
size of the switch and the number of flows. The
resequencing-and-output buffer after the second stage
not only performs resequencing to keep packets in
sequence, but also stores packets waiting for
transmission from the output links.\par
In this paper, we consider a traffic model with
multicasting flows. This is a more general model than
the point-to-point traffic model in [5]. A multicasting
flow is stream of packets that has one common input and
a set of common outputs. For the multicasting flows,
fanout splitting (see e.g., [4]) is performed at the
central buffers (the VOQ in front of the second stage).
The central buffers are assumed to be infinite so that
no packets are lost in the switch. We consider two
types of scheduling policies in the central buffers:
the FCFS policy and the Earliest Deadline First (EDF)
policy. For the FCFS policy, a jitter control
mechanism, is added in the VOQ in front of the second
stage. Such a jitter control mechanism delays every
packet to its maximum delay at the first stage so that
the flows entering the second stage are simply
time-shifted flows of the original ones. Our main
result for the FCFS scheme with jitter controls is the
following theorem. The proof of Theorem 1 is shown in
the full report [3].\par
Theorem 1: Suppose that all the buffers are empty at
time 0. Then the followings hold for FCFS scheme with
jitter control.\par
(i) The end-to-end delay for a packet through our
switch with multi-stage buffering is bounded above by
the sum of the delay through the corresponding FCFS
output-buffered switch and $ N L_{\rm max} + (N + 1)
M_{\rm max}$, where $ L_{\rm max}$ (resp. $ M_{\rm
max}$) is the maximum number of flows at an input
(resp. output) port.\par
(ii) The load-balancing buffer at an input port of the
first stage is bounded above by $ N L_{\rm
max}$.\par
(iii) The delay through the load-balancing buffer at an
input port of the first stage is bounded above by $ N
L_{\rm max}$.\par
(iv) The resequencing-and-output buffer at an output
port of the second stage is bounded above $ (N + 1)
M_{\rm max}$.\par
In the EDF scheme (see Figure 3), every packet is
assigned a deadline that is the departure time from the
corresponding FCFS output-buffered switch. Packets are
scheduled according to their deadlines in the central
buffers. For the EDF scheme, there is no need to
implement the jitter control mechanism in the FCFS
scheme. As such, average packet delay can be greatly
reduced. However, as there is no jitter control, one
might need a larger resequencing buffer than that in
the FCFS scheme with jitter control. Since the first
stage is the same as that in the FCFS scheme, both the
delay and the buffer size of the load-balancing buffer
are still bounded by $ N L_{\rm max}$. Moreover, we
show the following theorem for the EDF scheme. Its
proof is given in the full report [3].\par
Theorem 2: Suppose that all the buffers are empty at
time 0. Then the followings hold for the EDF
scheme.\par
(i) The end-to-end delay for a packet through our
switch with multi-stage buffering is bounded above by
the sum of the delay through the corresponding FCFS
output-buffered switch and $ N (L_{\rm max} + M_{\rm
max})$.\par
(ii) The resequencing-and-output buffer at an output
port of the second stage is bounded above $ N (L_{\rm
max} + M_{\rm max})$.\par
Computing the departure times from the corresponding
FCFS output-buffered switch needs global information of
all the inputs. A simple way is to use the packet
arrival times as deadlines. Then the EDF scheme based
on arrival times yields the same departure order except
those packets that arrives at same time. Since there
are at most $ M_{\rm max}$ packets that can arrive at
the same time to an output port of the corresponding
output-buffered switch, the end-to-end delay for a
packet through the multi-stage switch using arrival
times as deadlines is bounded above by the sum of the
delay through the corresponding FCFS output-buffered
switch and $ N L_{\rm max} + (N + 1) M_{\rm max}$.
Also, the resequencing-and-output buffer at an output
port of the second stage in this case is bounded above
$ N L_{\rm max} + (N + 1) M_{\rm max}$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kogan:2001:AEP,
author = "Yaakov Kogan",
title = "Asymptotic expansions for probability distributions in
large loss and closed queueing networks",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "25--27",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using integral representation in complex space and the
saddle point method asymptotic expansions for
probability distributions are derived for the
generalised Engset model and a closed queueing network
with multiple classes. The results can be applied to
bandwidth engineering and admission control in data
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2001:KLM,
author = "Yuliy Baryshnikov and E. G. {Coffman, Jr.} and Predrag
Jelenkovi{\'c}",
title = "{Kelly}'s {LAN} model revisited",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "28--29",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For a given $ k \geq 1 $, subintervals of a given
interval $ [0, X] $ arrive at random and are accepted
(allocated) so long as they overlap fewer than $k$
subintervals already accepted. Subintervals not
accepted are cleared, while accepted subintervals
remain allocated for random retention times before they
are released and made available to subsequent arrivals.
Thus, the system operates as a generalized many-server
queue under a loss protocol. We study a discretized
version of this model that appears in reference
theories for a number of applications; the one of most
interest here is linear communication networks, a model
originated by Kelly [2]. Other applications include
surface adsorption/desorption processes and reservation
systems [3, 1].\par
The interval $ [0, X]$, $X$ an integer, is subdivided
by the integers into slots of length $1$. An {\em
interval\/} is always composed of consecutive slots,
and a configuration $C$ of intervals is simply a finite
set of intervals in $ [0, X]$. A configuration $C$ is
{\em admissible\/} if every non-integer point in $ [0,
X]$ is covered by at most $k$ intervals in $C$. Denote
the set of admissible configurations on the interval $
[0, X]$ by $ C_X$. Assume that, for any integer point
{\em i}, intervals of length $l$ with left endpoint $i$
arrive at rate $ \lambda_l$; the arrivals of intervals
at different points and of different lengths are
independent. A newly arrived interval is included in
the configuration if the resulting configuration is
admissible; otherwise the interval is rejected. It is
convenient to assume that the arrival rates $
\lambda_l$ vanish for all but a finite number of
lengths $l$, say $ \lambda_l > 0$, $ 1 \leq l \leq L$,
and $ \lambda_l = 0$ otherwise.\par
The departure of intervals from configurations has a
similar description: the flow of `killing' signals for
intervals of length $l$ arrive at each integer $i$ at
rate $ \mu_l$. If at the time such a signal arrives,
there is at least one interval of length $l$ with its
left endpoint at $i$ in the configuration, then one of
them leaves.\par
Our primary interest is in steady-state estimates of
the vacant space, i.e., the total length of available
subintervals $ k X - \sum l_i$, where the $ l_i$ are
the lengths of the subintervals currently allocated. We
obtain explicit results for $ k = 1$ and for general
$k$ with all subinterval lengths equal to 2, the
classical {\em dimer\/} case of chemical applications.
Our analysis focuses on the asymptotic regime of large
retention times, and brings out an apparently new,
broadly useful technique for extracting asymptotic
behavior from generating functions in two
dimensions.\par
Our model, as proposed by Kelly [2], arises in a study
of one-dimensional communication networks (LAN's). In
this application, intervals correspond to the circuits
connecting communicating parties and $ [0, X]$
represents the bus. Kelly's main results apply to the
case $ k = 1$ and to the case of general $k$ with
interval lengths governed by a geometric law.\par
The focus here is on space utilization, so the results
here add to the earlier theory in three principal ways.
First, we give expected vacant space for $ k = 1$, with
special emphasis on small-$ \mu $ asymptotics. Behavior
in this regime is quite different from that seen in the
`jamming' limit (absorbing state) of the pure filling
model (all $ \mu $'s are identically 0). Second, the
important dimer case of chemical applications, where
all intervals have length 2, is covered. Finally, the
approach of the analysis itself appears to be new and
to hold promise for the analysis of similar Markov
chains. In very broad terms, expected vacant space is
expressed in terms of the geometric properties of a
certain plane curve defined by a bivariate generating
function.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2001:SOB,
author = "David Gamarnik",
title = "Stochastic online binpacking problem: exact conditions
for bounded expected queue lengths under the best fit
packing heuristic",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "30--31",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the following stochastic bin packing
process: the items of different sizes arrive at times
$t$ = 0, 1, 2, \ldots{} and are packed into unit size
bins using `largest first' rule. The unpacked items
form queues. Coffman and Stolyar [3] introduced this
system and posed the following question: under which
conditions expected queue lengths are bounded (system
is stable)? We provide exact computable conditions for
stability of this system using Lyapunov function
technique. The result holds for a very general class of
distributions of the arrival processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:2001:SCS,
author = "S. Lam and Rocky K. C. Chang",
title = "Stability comparison in single-server-multiple-queue
systems",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "32--34",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study stability comparison among
queues in single-server-multiple-queue systems. We
establish trichotomy between two queues in terms of
stability. We introduce a concept of degree of
instability which reflects the stability level of an
individual queue. Through comparing the degrees of
instabilities of two queues, we give conditions under
which two queues are as stable as each other and, one
queue is more (less) stable than the other. We also
generalize previous results regarding to stability
ranking or stability ordering, and accommodate them
into our general form.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Szlavik:2001:GGT,
author = "{\'A}rp{\'a}d Szl{\'a}vik",
title = "{GI/G/1} type processes: a non-inversive matrix
analytical solution",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "35--37",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new general solution method is derived for the
general {GI/G/1} type processes --- for the
steady-state distribution of infinite block-structured
Markov chains with repetitive structure. While matrix
inversion is needed in each iterational step of other
general (and of more special) matrix analytical
procedures, the method presented here uses matrix
addition and matrix multiplication only. In exchange,
the computational complexity and the memory requirement
is increasing in each iterational step of the proposed
method. This paper, however, lays priority on the
theoretical aspect of the general solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boots:2001:STP,
author = "Nam Kyoo Boots and Perwez Shahabuddin",
title = "Simulating tail probabilities in {GI/GI/1} queues and
insurance risk processes with subexponential
distributions (extended abstract)",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "38--39",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:2001:GPS,
author = "Sem Borst and Michel Mandjes and Miranda van Uitert",
title = "Generalized processor sharing with heterogeneous
traffic classes",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "40--42",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system with two heterogeneous traffic
classes, one having light-tailed characteristics, the
other one exhibiting heavy-tailed properties. The two
traffic classes are served in accordance with the
Generalized Processor Sharing (GPS) discipline.
GPS-based scheduling algorithms, such as Weighted Fair
Queueing (WFQ), have emerged as an important mechanism
for achieving service differentiation in
integrated-services networks. We determine the workload
asymptotics of the light-tailed class for the situation
where its GPS weight is larger than its traffic
intensity. The GPS mechanism ensures that the workload
is bounded above by that in an isolated system with the
light-tailed class served in isolation at a constant
rate equal to its GPS weight. We show that the workload
distribution is in fact asymptotically equivalent to
that in the isolated system, multiplied with a certain
pre-factor, which accounts for the interaction with the
heavy-tailed class. Specifically, the pre-factor
represents the probability that the heavy-tailed class
is backlogged long enough for the light-tailed class to
reach overflow. The results provide crucial qualitative
insight in the typical overflow scenario.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2001:MSL,
author = "Zhen Liu and Mark S. Squillante and Joel L. Wolf",
title = "On maximizing service-level-agreement profits",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "43--44",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an initial study of a methodology for
maximizing profits in a general class of e-commerce
environments under a cost model in which revenues are
generated when QoS guarantees are satisfied and
penalties are incurred otherwise. The QoS guarantees
are based on multiclass SLAs between service providers
and their clients, which include the tail distributions
of the per-class response times. Our approach consists
of formulating the resulting optimization problem as a
network flow model with a separable set of concave
objective function summands based on derived
queueing-theoretic formulas. This problem is then
solved in a very efficient manner via a fixed-point
iteration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2001:PAA,
author = "Yingdong Lu and Jing-Sheng Song and Weian Zheng",
title = "Performance analysis of assemble-to-order systems
through strong approximations",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "45--46",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2001:OSQ,
author = "Mark S. Squillante and Cathy H. Xia and Li Zhang",
title = "Optimal scheduling in queueing network models of
high-volume commercial {Web} sites",
journal = j-SIGMETRICS,
volume = "29",
number = "3",
pages = "47--48",
month = dec,
year = "2001",
CODEN = "????",
DOI = "https://doi.org/10.1145/507553.507573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:37:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The optimal control of performance measures in
high-volume commercial web sites requires a fundamental
understanding of the interactions between the diverse
set of Internet services that support customer needs
and the different importance levels of these services
to both the customer and the e-commerce merchant. We
present a study of the server control policy in a
multiclass queueing network that maximizes a particular
function of profit, or minimizes a particular function
of cost, across the different classes of Internet
services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sevcik:2002:SPC,
author = "Kenneth C. Sevcik and Hai Wang",
title = "Solution properties and convergence of an approximate
mean value analysis algorithm",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "3--10",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the solution properties and convergence
results of an approximate Mean Value Analysis (MVA)
algorithm, the Queue Line (QL) algorithm, for solving
separable queueing networks. We formally prove that the
QL algorithm is always more accurate than, and yet has
the same computational complexity as the
Bard-Schweitzer Proportional Estimation algorithm, the
most popular approximate MVA algorithm for solving this
type of queueing networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:2002:CCA,
author = "Carey Williamson and Qian Wu",
title = "A case for context-aware {TCP\slash IP}",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "11--23",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the design and evaluation of
CATNIP, a Context-Aware Transport/Network Internet
Protocol for the Web. This integrated protocol uses
application-layer knowledge (i.e., Web document size)
to provide explicit context information to the TCP and
IP protocols. While this approach violates the
traditional layered Internet protocol architecture, it
enables informed decision-making, both at network
endpoints and at network routers, regarding flow
control, congestion control, and packet discard
decisions. We evaluate the performance of the
context-aware TCP/IP approach first using ns-2 network
simulation, and then using WAN emulation to test a
prototype implementation of CATNIP in the Linux kernel
of an Apache Web server. The advantages of the CATNIP
approach are particularly evident in a congested
Internet with 1-10\% packet loss. Simulation results
indicate a 10-20\% reduction in TCP packet loss using
simple endpoint control mechanisms, with no adverse
impact on Web page retrieval times. More importantly,
using CATNIP context information at IP routers can
reduce mean Web page retrieval times by 20-80\%, and
the standard deviation by 60-90\%. The CATNIP algorithm
can also interoperate with Random Early Detection (RED)
for active queue management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet protocols; network emulation; network
simulation; TCP/IP; web performance",
}
@Article{Menasce:2002:SAM,
author = "Daniel A. Menasc{\'e}",
title = "Simple analytic modeling of software contention",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "24--30",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512844",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Being able to model contention for software resources
(e.g., a critical section or database lock) is
paramount to building performance models that capture
all aspects of the delay encountered by a process as it
executes. Several methods have been offered for dealing
with software contention and with message blocking in
client-server systems. We present in this paper a
simple, straightforward, easy to understand and
implement, approach to modeling software contention
using queuing networks. The approach consists of a
two-level iterative process. Two queuing networks are
considered: one represents software resources and the
other hardware resources. Multiclass models are allowed
and both open and closed queuing networks can be used
at the software layer. Any solution technique----exact
or approximate--can be used at any of the levels. This
technique falls in the general nature of fixed-point
approximate models and is similar in nature to other
approaches. The main difference lies in its
simplicity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cheng:2002:PSB,
author = "William C. Cheng and Cheng-Fu Chou and Leana Golubchik
and Samir Khuller",
title = "A performance study of {Bistro}, a scalable upload
architecture",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "31--39",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hot spots are a major obstacle to achieving
scalability in the Internet. We have observed that the
existence of hot spots in {\em upload\/} applications
(whose examples include submission of income tax forms
and conference paper submission) is largely due to
approaching deadlines. The hot spot is exacerbated by
the long transfer times. To address this problem, we
proposed {\em Bistro}, a framework for building
scalable wide-area upload applications, where we employ
intermediaries, termed {\em bistros}, for improving the
efficiency and scalability of uploads. Consequently,
appropriate assignment of clients to {\em bistros\/}
has a significant effect on the performance of upload
applications and thus constitutes an important research
problem. Therefore, in this paper we focus on the
assignment of clients to {\em bistros\/} problem and
present a performance study which demonstrates the
potential performance gains of the {\em Bistro\/}
framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lawson:2002:MQB,
author = "Barry G. Lawson and Evgenia Smirni",
title = "Multiple-queue backfilling scheduling with priorities
and reservations for parallel systems",
journal = j-SIGMETRICS,
volume = "29",
number = "4",
pages = "40--47",
month = mar,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/512840.512846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:08 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe a new, non-FCFS policy to schedule
parallel jobs on systems that may be part of a
computational grid. Our algorithm continuously monitors
the system (i.e., intensity of incoming jobs and
variability of their resource demands) and continuously
adapts its scheduling parameters to sudden workload
fluctuations. The proposed policy is based on
backfilling which permits job rearrangement in the
waiting queue. By exploiting otherwise idle processors,
this rearrangement reduces fragmentation of system
resources, thereby providing higher system utilization.
We propose to maintain multiple job queues that
effectively separate jobs according to their projected
execution time. Our policy supports different job
priority classes as well as job reservations, making it
appropriate for scheduling jobs on parallel systems
that are part of a computational grid. Detailed
performance comparisons via simulation using traces
from the Parallel Workload Archive indicate that the
proposed policy consistently outperforms traditional
scheduling approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "backfilling schedulers; batch schedulers;
computational grids; parallel systems; performance
analysis",
}
@Article{Pasztor:2002:PBP,
author = "Attila P{\'a}sztor and Darryl Veitch",
title = "{PC} based precision timing without {GPS}",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "1--10",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511336",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A highly accurate monitoring solution for active
network measurement is provided without the need for
GPS, based on an alternative software clock for PC's
running Unix. With respect to clock {\em rate}, its
performance exceeds common GPS and NTP synchronized
software clock accuracy. It is based on the TSC
register counting CPU cycles and offers a resolution of
around 1ns, a rate stability of 0.1PPM equal to that of
the underlying hardware, and a processing overhead well
under 1$ \mu $ s per timestamp. It is scalable and can
be run in parallel with the usual clock. It is argued
that accurate rate, and not synchronised offset, is the
key requirement of a clock for network measurement. The
clock requires an accurate estimation of the CPU cycle
period. Two calibration methods which do not require a
reference clock at the calibration point are given. To
the TSC clock we add timestamping optimisations to
create two high accuracy monitors, one based on Linux
and the other on Real-Time Linux. The TSC-RT-Linux
monitor has offset fluctuations of the order of 1$ \mu
$ s. The clock is ideally suited for high precision
active measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GPS; network measurement; NTP; PC clocks; software
clock; synchronization; timing",
}
@Article{Coates:2002:MLN,
author = "Mark Coates and Rui Castro and Robert Nowak and Manik
Gadhiok and Ryan King and Yolanda Tsang",
title = "Maximum likelihood network topology identification
from edge-based unicast measurements",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "11--20",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511337",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network tomography is a process for inferring
`internal' link-level delay and loss performance
information based on end-to-end (edge) network
measurements. These methods require knowledge of the
network topology; therefore a first crucial step in the
tomography process is topology identification. This
paper considers the problem of discovering network
topology solely from host-based, unicast measurements,
without internal network cooperation. First, we
introduce a novel delay-based measurement scheme that
does not require clock synchronization, making it more
practical than other previous proposals. In contrast to
methods that rely on network cooperation, our
methodology has the potential to identify layer two
elements (provided they are logical topology branching
points and induce some measurable delay). Second, we
propose a maximum penalized likelihood criterion for
topology identification. This is a global optimality
criterion, in contrast to other recent proposals for
topology identification that employ suboptimal,
pair-merging strategies. We develop a novel Markov
Chain Monte Carlo (MCMC) procedure for rapid
determination of the most likely topologies. The
performance of our new probing scheme and
identification algorithm is explored through simulation
and Internet experiments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bu:2002:NTG,
author = "Tian Bu and Nick Duffield and Francesco {Lo Presti}
and Don Towsley",
title = "Network tomography on general topologies",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "21--30",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511338",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider the problem of inferring
link-level loss rates from end-to-end multicast
measurements taken from a collection of trees. We give
conditions under which loss rates are identifiable on a
specified set of links. Two algorithms are presented to
perform the link-level inferences for those links on
which losses can be identified. One, the {\em minimum
variance weighted average (MVWA) algorithm\/} treats
the trees separately and then averages the results. The
second, based on {\em expectation-maximization (EM)\/}
merges all of the measurements into one computation.
Simulations show that EM is slightly more accurate than
MVWA, most likely due to its more efficient use of the
measurements. We also describe extensions to the
inference of link-level delay, inference from
end-to-end unicast measurements, and inference when
some measurements are missing.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2002:LEL,
author = "Song Jiang and Xiaodong Zhang",
title = "{LIRS}: an efficient low inter-reference recency set
replacement policy to improve buffer cache
performance",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "31--42",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511340",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although LRU replacement policy has been commonly used
in the buffer cache management, it is well known for
its inability to cope with access patterns with weak
locality. Previous work, such as LRU-K and 2Q, attempts
to enhance LRU capacity by making use of additional
history information of previous block references other
than only the recency information used in LRU. These
algorithms greatly increase complexity and/or can not
consistently provide performance improvement. Many
recently proposed policies, such as UBM and SEQ,
improve replacement performance by exploiting access
regularities in references. They only address LRU
problems on certain specific and well-defined cases
such as access patterns like sequences and loops.
Motivated by the limits of previous studies, we propose
an efficient buffer cache replacement policy, called
{\em Low Inter-reference Recency Set\/} (LIRS). LIRS
effectively addresses the limits of LRU by using
recency to evaluate Inter-Reference Recency (IRR) for
making a replacement decision. This is in contrast to
what LRU does: directly using recency to predict next
reference timing. At the same time, LIRS almost retains
the same simple assumption of LRU to predict future
access behavior of blocks. Our objectives are to
effectively address the limits of LRU for a general
purpose, to retain the low overhead merit of LRU, and
to outperform those replacement policies relying on the
access regularity detections. Conducting simulations
with a variety of traces and a wide range of cache
sizes, we show that LIRS significantly outperforms LRU,
and outperforms other existing replacement algorithms
in most cases. Furthermore, we show that the additional
cost for implementing LIRS is trivial in comparison
with LRU.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2002:MAD,
author = "Mark S. Squillante and Yanyong Zhang and Anand
Sivasubramaniam and Natarajan Gautam and Hubertus
Franke and Jose Moreira",
title = "Modeling and analysis of dynamic coscheduling in
parallel and distributed environments",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "43--54",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511341",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling in large-scale parallel systems has been
and continues to be an important and challenging
research problem. Several key factors, including the
increasing use of off-the-shelf clusters of
workstations to build such parallel systems, have
resulted in the emergence of a new class of scheduling
strategies, broadly referred to as dynamic
coscheduling. Unfortunately, the size of both the
design and performance spaces of these emerging
scheduling strategies is quite large, due in part to
the numerous dynamic interactions among the different
components of the parallel computing environment as
well as the wide range of applications and systems that
can comprise the parallel environment. This in turn
makes it difficult to fully explore the benefits and
limitations of the various proposed dynamic
coscheduling approaches for large-scale systems solely
with the use of simulation and/or experimentation. To
gain a better understanding of the fundamental
properties of different dynamic coscheduling methods,
we formulate a general mathematical model of this class
of scheduling strategies within a unified framework
that allows us to investigate a wide range of parallel
environments. We derive a matrix-analytic analysis
based on a stochastic decomposition and a fixed-point
iteration. A large number of numerical experiments are
performed in part to examine the accuracy of our
approach. These numerical results are in excellent
agreement with detailed simulation results. Our
mathematical model and analysis is then used to explore
several fundamental design and performance tradeoffs
associated with the class of dynamic coscheduling
policies across a broad spectrum of parallel computing
environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2002:AMS,
author = "Eitan Bachmat and Jiri Schindler",
title = "Analysis of methods for scheduling low priority disk
drive tasks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "55--65",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511342",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper analyzes various algorithms for scheduling
low priority disk drive tasks. The derived closed form
solution is applicable to class of greedy algorithms
that include a variety of background disk scanning
applications. By paying close attention to many
characteristics of modern disk drives, the analytical
solutions achieve very high accuracy---the difference
between the predicted response times and the
measurements on two different disks is only 3\% for all
but one examined workload. This paper also proves a
theorem which shows that background tasks implemented
by greedy algorithms can be accomplished with very
little seek penalty. Using greedy algorithm gives a
10\% shorter response time for the foreground
application requests and up to a 20\% decrease in total
background task run time compared to results from
previously published techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snavely:2002:SJP,
author = "Allan Snavely and Dean M. Tullsen and Geoff Voelker",
title = "Symbiotic jobscheduling with priorities for a
simultaneous multithreading processor",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "66--76",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511343",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simultaneous Multithreading machines benefit from
jobscheduling software that monitors how well
coscheduled jobs share CPU resources, and coschedules
jobs that interact well to make more efficient use of
those resources. As a result, informed coscheduling can
yield significant performance gains over naive
schedulers. However, prior work on coscheduling focused
on equal-priority job mixes, which is an unrealistic
assumption for modern operating systems. This paper
demonstrates that a scheduler for an SMT machine can
both satisfy process priorities and symbiotically
schedule low and high priority threads to increase
system throughput. Naive priority schedulers dedicate
the machine to high priority jobs to meet priority
goals, and as a result decrease opportunities for
increased performance from multithreading and
coscheduling. More informed schedulers, however, can
dynamically monitor the progress and resource
utilization of jobs on the machine, and dynamically
adjust the degree of multithreading to improve
performance while still meeting priority goals. Using
detailed simulation of an SMT architecture, we
introduce and evaluate a series of five software and
hardware-assisted priority schedulers. Overall, our
results indicate that coscheduling priority jobs can
significantly increase system throughput by as much as
40\%, and that (1) the benefit depends upon the
relative priority of the coscheduled jobs, and (2) more
sophisticated schedulers are more effective when the
differences in priorities are greatest. We show that
our priority schedulers can decrease average turnaround
times for a random job mix by as much as 33\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "job scheduling; priorities; simultaneous
multithreading",
}
@Article{Harrison:2002:PTD,
author = "Peter G. Harrison and William J. Knottenbelt",
title = "Passage time distributions in large {Markov} chains",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "77--85",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Probability distributions of response times are
important in the design and analysis of transaction
processing systems and computer-communication systems.
We present a general technique for deriving such
distributions from high-level modelling formalisms
whose state spaces can be mapped onto finite Markov
chains. We use a load-balanced, distributed
implementation to find the Laplace transform of the
first passage time density and its derivatives at
arbitrary values of the transform parameter $s$.
Setting $ s = 0$ yields moments while the full passage
time distribution is obtained using a novel distributed
Laplace transform inverter based on the Laguerre
method. We validate our method against a variety of
simple densities, cycle time densities in certain
overtake-free (tree-like) queueing networks and a
simulated Petri net model. Our implementation is
thereby rigorously validated and has already been
applied to substantial Markov chains with over 1
million states. Corresponding theoretical results for
semi-Markov chains are also presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2002:EAS,
author = "Alma Riska and Evgenia Smirni",
title = "Exact aggregate solutions for {M/G/1}-type {Markov}
processes",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "86--96",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511346",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new methodology for the exact analysis
of M/G/1-type Markov processes. The methodology uses
basic, well-known results for Markov chains by
exploiting the structure of the repetitive portion of
the chain and recasting the overall problem into the
computation of the solution of a finite linear system.
The methodology allows for the calculation of the
aggregate probability of a finite set of classes of
states from the state space, appropriately defined.
Further, it allows for the computation of a set of
measures of interest such as the system queue length or
any of its higher moments. The proposed methodology is
exact. Detailed experiments illustrate that the
methodology is also numerically stable, and in many
cases can yield significantly less expensive solutions
when compared with other methods, as shown by detailed
time and space complexity analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aggregation; M/G/1-type processes; Markov chains;
matrix analytic method",
}
@Article{Jin:2002:SMD,
author = "Shudong Jin and Azer Bestavros",
title = "Scalability of multicast delivery for non-sequential
streaming access",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "97--107",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511347",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To serve asynchronous requests using multicast, two
categories of techniques---stream merging and periodic
broadcasting---have been proposed. For sequential
streaming access, where requests are uninterrupted from
the beginning to the end of an object, these techniques
are highly scalable: the required server bandwidth for
stream merging grows {\em logarithmically\/} as request
arrival rate, and the required server bandwidth for
periodic broadcasting varies {\em logarithmically\/} as
the inverse of start-up delay. A sequential access
model, however, is inappropriate to model partial
requests and client interactivity observed in various
streaming access workloads. This paper analytically and
experimentally studies the scalability of multicast
delivery under a non-sequential access model where
requests start at random points in the object. We show
that the required server bandwidth for any protocol
providing immediate service grows at least as the {\em
square root\/} of request arrival rate, and the
required server bandwidth for any protocol providing
delayed service grows {\em linearly\/} with the inverse
of start-up delay. We also investigate the impact of
limited client receiving bandwidth on scalability. We
optimize practical protocols which provide immediate
service to non-sequential requests. The protocols
utilize limited client receiving bandwidth, and they
are near-optimal in that the required server bandwidth
is very close to its lower bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mauer:2002:FST,
author = "Carl J. Mauer and Mark D. Hill and David A. Wood",
title = "Full-system timing-first simulation",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "108--116",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511349",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer system designers often evaluate future design
alternatives with detailed simulators that strive for
{\em functional fidelity\/} (to execute relevant
workloads) and {\em performance fidelity\/} (to rank
design alternatives). Trends toward multi-threaded
architectures, more complex micro-architectures, and
richer workloads, make authoring detailed simulators
increasingly difficult. To manage simulator complexity,
this paper advocates decoupled simulator organizations
that separate functional and performance concerns.
Furthermore, we define an approach, called {\em
timing-first simulation}, that uses an augmented timing
simulator to execute instructions important to
performance in conjunction with a functional simulator
to insure correctness. This design simplifies software
development, leverages existing simulators, and can
model micro-architecture timing in detail. We describe
the timing-first organization and our experiences
implementing TFsim, a full-system multiprocessor
performance simulator. TFsim models a pipelined,
out-of-order micro-architecture in detail, was
developed in less than one person-year, and performs
competitively with previously-published simulators.
TFsim's timing simulator implements dynamically common
instructions (99.99\% of them), while avoiding the vast
and exacting implementation efforts necessary to run
unmodified commercial operating systems and workloads.
Virtutech Simics, a full-system functional simulator,
checks and corrects the timing simulator's execution,
contributing 18-36\% to the overall run-time. TFsim's
mostly correct functional implementation introduces a
worst-case performance error of 4.8\% for our
commercial workloads. Some additional simulator
performance is gained by verifying functional
correctness less often, at the cost of some additional
performance error.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2002:PPR,
author = "Ruoming Jin and Gagan Agrawal",
title = "Performance prediction for random write reductions: a
case study in modeling shared memory programs",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "117--128",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511350",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we revisit the problem of performance
prediction on shared memory parallel machines,
motivated by the need for selecting parallelization
strategy for {\em random write reductions.\/} Such
reductions frequently arise in data mining algorithms.
In our previous work, we have developed a number of
techniques for parallelizing this class of reductions.
Our previous work has shown that each of the three
techniques, {\em full replication, optimized full
locking}, and {\em cache-sensitive}, can outperform
others depending upon problem, dataset, and machine
parameters. Therefore, an important question is, {\em
`Can we predict the performance of these techniques for
a given problem, dataset, and machine?'.\/} This paper
addresses this question by developing an analytical
performance model that captures a two-level cache,
coherence cache misses, TLB misses, locking overheads,
and contention for memory. Analytical model is combined
with results from micro-benchmarking to predict
performance on real machines. We have validated our
model on two different SMP machines. Our results show
that our model effectively captures the impact of
memory hierarchy (two-level cache and TLB) as well as
the factors that limit parallelism (contention for
locks, memory contention, and coherence cache misses).
The difference between predicted and measured
performance is within 20\% in almost all cases.
Moreover, the model is quite accurate in predicting the
relative performance of the three parallelization
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kandiraju:2002:CTB,
author = "Gokul B. Kandiraju and Anand Sivasubramaniam",
title = "Characterizing the $d$-{TLB} behavior of {SPEC
CPU2000} benchmarks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "129--139",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the numerous optimization and evaluation
studies that have been conducted with TLBs over the
years, there is still a deficiency in an in-depth
understanding of TLB characteristics from an
application angle. This paper presents a detailed
characterization study of the TLB behavior of the SPEC
CPU2000 benchmark suite. The contributions of this work
are in identifying important application
characteristics for TLB studies, quantifying the
SPEC2000 application behavior for these
characteristics, as well as making pronouncements and
suggestions for future research based on these results.
Around one-fourth of the SPEC2000 applications (ammp,
apsi, galgel, lucas, mcf, twolf and vpr) have
significant TLB missrates. Both capacity and
associativity are influencing factors on miss-rates,
though they do not necessarily go hand-in-hand.
Multi-level TLBs are definitely useful for these
applications in cutting down access times without
significant miss rate degradation. Superpaging to
combine TLB entries may not be rewarding for many of
these applications. Software management of TLBs in
terms of determining what entries to prefetch, what
entries to replace, and what entries to pin has a lot
of potential to cut down miss rates considerably.
Specifically, the potential benefits of prefetching TLB
entries is examined, and Distance Prefetching is shown
to give good prediction accuracy for these
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hertz:2002:EFG,
author = "Matthew Hertz and Stephen M. Blackburn and J. Eliot B.
Moss and Kathryn S. McKinley and Darko Stefanovi{\'c}",
title = "Error-free garbage collection traces: how to cheat and
not get caught",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "140--151",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Programmers are writing a large and rapidly growing
number of programs in object-oriented languages such as
Java that require garbage collection (GC). To explore
the design and evaluation of GC algorithms quickly,
researchers are using simulation based on traces of
object allocation and lifetime behavior. The {\em brute
force\/} method generates perfect traces using a
whole-heap GC at every potential GC point in the
program. Because this process is prohibitively
expensive, researchers often use {\em granulated\/}
traces by collecting only periodically, e.g., every 32K
bytes of allocation. We extend the state of the art for
simulating GC algorithms in two ways. First, we present
a systematic methodology and results on the effects of
trace granularity for a variety of copying GC
algorithms. We show that trace granularity often
distorts GC performance results compared with perfect
traces, and that some GC algorithms are more sensitive
to this effect than others. Second, we introduce and
measure the performance of a new precise algorithm for
generating GC traces which is over 800 times faster
than the brute force method. Our algorithm, called
Merlin, frequently timestamps objects and later uses
the timestamps of dead objects to reconstruct precisely
when they died. It performs only periodic garbage
collections and achieves high accuracy at low cost,
eliminating any reason to use granulated traces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cameron:2002:HDM,
author = "Craig W. Cameron and Steven H. Low and David X. Wei",
title = "High-density model for server allocation and
placement",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "152--159",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511354",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well known that optimal server placement is
NP-hard. We present an approximate model for the case
when both clients and servers are dense, and propose a
simple server allocation and placement algorithm based
on high-rate vector quantization theory. The key idea
is to regard the location of a request as a random
variable with probability density that is proportional
to the demand at that location, and the problem of
server placement as source coding, i.e., to optimally
map a source value (request location) to a code-word
(server location) to minimize distortion (network
cost). This view has led to a joint server allocation
and placement algorithm that has a time-complexity that
is linear in the number of clients. Simulations are
presented to illustrate its performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content distribution; high density; server placement
and allocation",
}
@Article{Olshefski:2002:ICR,
author = "David P. Olshefski and Jason Nieh and Dakshi Agrawal",
title = "Inferring client response time at the {Web} server",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "160--171",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511355",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As businesses continue to grow their World Wide Web
presence, it is becoming increasingly vital for them to
have quantitative measures of the client perceived
response times of their web services. We present Certes
(CliEnt Response Time Estimated by the Server), an
online server-based mechanism for web servers to
measure client perceived response time, as if measured
at the client. Certes is based on a model of TCP that
quantifies the effect that connection drops have on
perceived client response time, by using three simple
server-side measurements: connection drop rate,
connection accept rate and connection completion rate.
The mechanism does not require modifications to http
servers or web pages, does not rely on probing or third
party sampling, and does not require client-side
modifications or scripting. Certes can be used to
measure response times for any web content, not just
HTML. We have implemented Certes and compared its
response time measurements with those obtained with
detailed client instrumentation. Our results
demonstrate that Certes provides accurate server-based
measurements of client response times in HTTP 1.0/1.1
[14] environments, even with rapidly changing
workloads. Certes runs online in constant time with
very low overhead. It can be used at web sites and
server farms to verify compliance with service level
objectives.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "client perceived response time; web server",
}
@Article{Lee:2002:ACD,
author = "Sam C. M. Lee and John C. S. Lui and David K. Y. Yau",
title = "Admission control and dynamic adaptation for a
proportional-delay diffserv-enabled {Web} server",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "172--182",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511356",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a web server that can provide
differentiated services to clients with different QoS
requirements. The web server can provide $ N > 1 $
classes of service. Rather than using a strict priority
policy, which may lead to request starvation, the web
server provides a proportional-delay differentiated
service (PDDS) to heterogeneous clients. An operator
for the web server can specify `fixed' performance
spacings between classes, namely, $ r_{i, i + 1} > 1 $,
for $ i = 1, \ldots {}, N - 1 $. Requests in class $ i
+ 1 $ are guaranteed to have an average waiting time
which is $ 1 / r_{i, i + 1} $ of the average waiting
time of class $i$ requests. With PDDS, we can provide
consistent performance spacings over a wide range of
system loadings. In addition, each client can specify a
maximum average waiting time requirement to be
guaranteed by the web server. We propose two efficient
admission control algorithms so that a web server can
provide the QoS guarantees and, at the same time,
classify each client to its `lowest' admissible class,
resulting in lowest usage cost for the client. We also
consider how to perform end-point dynamic adaptation
such that clients can submit requests at lower class
and further reduce their usage cost, without violating
their QoS requirements. We propose two dynamic
adaptation algorithms: one is server-based and the
other is client-based. The client-based adaptation is
based on a non-cooperative game technique. We report
diverse experimental results to illustrate the
effectiveness of these algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2002:QSE,
author = "Haonan Tan and Derek L. Eager and Mary K. Vernon and
Hongfei Guo",
title = "Quality of service evaluations of multicast streaming
protocols",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "183--194",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511358",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently proposed scalable on-demand streaming
protocols have previously been evaluated using a system
cost measure termed the `required server bandwidth'.
For the scalable protocols that provide immediate
service to each client when the server is not
overloaded, this paper develops simple analytic models
to evaluate two client-oriented quality of service
metrics, namely (1) the mean client waiting time in
systems where clients are willing to wait if a
(well-provisioned) server is temporarily overloaded,
and (2) the fraction of clients who balk (i.e., leave
without receiving their requested media content) in
systems where the clients will tolerate no or only very
low service delays during a temporary overload. The
models include novel approximate MVA techniques that
appear to extend the range of applicability of
customized AMVA to include questions focussed on state
probabilities rather than on mean values, and to
systems in which the operating points of interest do
not include substantial client queues. For example, the
new AMVA models accurately estimate the server
bandwidth needed to achieve a balking rate as low as
one in ten thousand. The analytic models can easily be
applied to determine the server bandwidth needed for a
given number of media files, anticipated total client
request rate and file access frequencies, and target
balking rate or mean wait. Results show that (a)
scalable media servers that are configured with the
`required server bandwidth' defined in previous work
have low mean wait but may have unacceptably high
client balking rates (i.e., greater than one in
twenty), (b) for high to moderate client load, only a
10--50\% increase in the previously defined required
server bandwidth is needed to achieve a very low
balking rate (e.g., one in ten thousand), and (c) media
server performance (either mean wait or balking rate)
degrades rapidly if the actual client load is more than
10\% greater than the anticipated load.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balachandran:2002:CUB,
author = "Anand Balachandran and Geoffrey M. Voelker and
Paramvir Bahl and P. Venkat Rangan",
title = "Characterizing user behavior and network performance
in a public wireless {LAN}",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "195--205",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511359",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents and analyzes user behavior and
network performance in a public-area wireless network
using a workload captured at a well-attended ACM
conference. The goals of our study are: (1) to extend
our understanding of wireless user behavior and
wireless network performance; (2) to characterize
wireless users in terms of a parameterized model for
use with analytic and simulation studies involving
wireless LAN traffic; and (3) to apply our workload
analysis results to issues in wireless network
deployment, such as capacity planning, and potential
network optimizations, such as algorithms for load
balancing across multiple access points (APs) in a
wireless network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2002:ECT,
author = "Harkirat Singh and Suresh Singh",
title = "Energy consumption of {TCP Reno}, {Newreno}, and
{SACK} in multi-hop wireless networks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "206--216",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we compare the energy consumption
behavior of three versions of TCP --- Reno, Newreno,
and SACK. The experiments were performed on a wireless
testbed where we measured the energy consumed at the
sender node. Our results indicate that, in most cases,
using total energy consumed as the metric, SACK
outperforms Newreno and Reno while Newreno performs
better than Reno. The experiments emulated a large set
of network conditions including variable round trip
times, random loss, bursty loss, and packet reordering.
We also estimated the idealized energy for each of the
three implementations (i.e., we subtract out the energy
consumed when the sender is idle) and here,
surprisingly, we find that in many instances SACK
performs poorly compared to the other two
implementations. We conclude that if the mobile device
has a very low idle power consumption then SACK is not
the best implementation to use for bursty or random
loss. On the other hand, if the idle power consumption
is significant, then SACK is the best choice since it
has the lowest overall energy consumption.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy; mobile; TCP; wireless",
}
@Article{Heath:2002:ICA,
author = "Taliver Heath and Richard P. Martin and Thu D.
Nguyen",
title = "Improving cluster availability using workstation
validation",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "217--227",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511362",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We demonstrate a framework for improving the
availability of cluster based Internet services. Our
approach models Internet services as a collection of
interconnected components, each possessing well defined
interfaces and failure semantics. Such a decomposition
allows designers to engineer high availability based on
an understanding of the interconnections and isolated
fault behavior of each component, as opposed to ad-hoc
methods. In this work, we focus on using the entire
commodity workstation as a component because it
possesses natural, fault-isolated interfaces. We define
a failure event as a reboot because not only is a
workstation unavailable during a reboot, but also
because reboots are symptomatic of a larger class of
failures, such as configuration and operator errors.
Our observations of 3 distinct clusters show that the
time between reboots is best modeled by a Weibull
distribution with shape parameters of less than 1,
implying that a workstation becomes more reliable the
longer it has been operating. Leveraging this observed
property, we design an allocation strategy which
withholds recently rebooted workstations from active
service, validating their stability before allowing
them to return to service. We show via simulation that
this policy leads to a 70-30 rule-of-thumb: For a
constant utilization, approximately 70\% of the
workstation failures can be masked from end clients
with 30\% extra capacity added to the cluster, provided
reboots are not strongly correlated. We also found our
technique is most sensitive to the burstiness of
reboots as opposed to absolute lengths of workstation
uptimes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lai:2002:LWA,
author = "Albert Lai and Jason Nieh",
title = "Limits of wide-area thin-client computing",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "228--239",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511363",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While many application service providers have proposed
using thin-client computing to deliver computational
services over the Internet, little work has been done
to evaluate the effectiveness of thin-client computing
in a wide-area network. To assess the potential of
thin-client computing in the context of future
commodity high-bandwidth Internet access, we have used
a novel, non-invasive slow-motion benchmarking
technique to evaluate the performance of several
popular thin-client computing platforms in delivering
computational services cross-country over Internet2.
Our results show that using thin-client computing in a
wide-area network environment can deliver acceptable
performance over Internet2, even when client and server
are located thousands of miles apart on opposite ends
of the country. However, performance varies widely
among thin-client platforms and not all platforms are
suitable for this environment. While many thin-client
systems are touted as being bandwidth efficient, we
show that network latency is often the key factor in
limiting wide-area thin-client performance.
Furthermore, we show that the same techniques used to
improve bandwidth efficiency often result in worse
overall performance in wide-area networks. We
characterize and analyze the different design choices
in the various thin-client platforms and explain which
of these choices should be selected for supporting
wide-area computing services.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vetter:2002:DSP,
author = "Jeffrey Vetter",
title = "Dynamic statistical profiling of communication
activity in distributed applications",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "240--250",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511364",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance analysis of communication activity for a
terascale application with traditional message tracing
can be overwhelming in terms of overhead, perturbation,
and storage. We propose a novel alternative that
enables dynamic statistical profiling of an
application's communication activity using message
sampling. We have implemented an operational prototype,
named PHOTON, and our evidence shows that this new
approach can provide an accurate, low-overhead,
tractable alternative for performance analysis of
communication activity. PHOTON consists of two
components: a Message Passing Interface (MPI) profiling
layer that implements sampling and analysis, and a
modified MPI runtime that appends a small but necessary
amount of information to individual messages. More
importantly, this alternative enables an assortment of
runtime analysis techniques so that, in contrast to
post-mortem, trace-based techniques, the raw
performance data can be jettisoned immediately after
analysis. Our investigation shows that message sampling
can reduce overhead to imperceptible levels for many
applications. Experiments on several applications
demonstrate the viability of this approach. For
example, with one application, our technique reduced
the analysis overhead from 154\% for traditional
tracing to 6\% for statistical profiling. We also
evaluate different sampling techniques in this
framework. The coverage of the sample space provided by
purely random sampling is superior to counter- and
timer-based sampling. Also, PHOTON's design reveals
that frugal modifications to the MPI runtime system
could facilitate such techniques on production
computing systems, and it suggests that this sampling
technique could execute continuously for long-running
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cook:2002:TRP,
author = "Jeanine Cook and Richard L. Oliver and Eric E.
Johnson",
title = "Toward reducing processor simulation time via dynamic
reduction of microarchitecture complexity",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "252--253",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511366",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As processor microarchitectures continue to increase
in complexity, so does the time required to explore the
design space. Performing cycle-accurate, detailed
timing simulation of a realistic workload on a proposed
processor microarchitecture often incurs a
prohibitively large time cost. We propose a method to
reduce the time cost of simulation by dynamically
varying the complexity of the processor model
throughout the simulation. In this paper, we give first
evidence of the feasibility of this approach. We
demonstrate that there are significant amounts of time
during a simulation where a reduced processor model
accurately tracks important behavior of a full model,
and that by simulating the reduced model during these
times the total simulation time can be reduced.
Finally, we discuss metrics for detecting areas where
the two processor models track each other, which is
crucial for dynamically deciding when to use a reduced
rather than a full model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shih:2002:ETC,
author = "Jimmy S. Shih and Randy H. Katz",
title = "Evaluating tradeoffs of congestion pricing for voice
calls",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "254--255",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511367",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We conducted user experiments and simulations to
understand the tradeoffs of congestion pricing between
system performance and user satisfaction for a large
community of users. We found that congestion pricing
can be effective for voice calls because it only needs
to be applied occasionally and that users are
responsive to occasional price increases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sivan-Zimet:2002:WBO,
author = "Miriam Sivan-Zimet and Tara M. Madhyastha",
title = "Workload based optimization of probe-based storage",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "256--257",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511368",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance gap between microprocessors and
secondary storage is still a limitation in today's
systems. Academia and industry are developing new
technologies to overcome this gap, such as improved
read-write head technology and higher storage
densities. One promising new technology is probe-based
storage[1]. Characteristics of probe-based storage
include small size, high density, high parallelism, low
power consumption, and rectilinear motion. We have
created a probe-based storage simulation model,
configurable to different design points, and identify
its sensitivity to various parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lv:2002:SRU,
author = "Qin Lv and Pei Cao and Edith Cohen and Kai Li and
Scott Shenker",
title = "Search and replication in unstructured peer-to-peer
networks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "258--259",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511369",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Decentralized and unstructured peer-to-peer networks
such as Gnutella are attractive for certain
applications because they require no centralized
directories and no precise control over network
topology or data placement. However, the flooding-based
query algorithm used in Gnutella does not scale; each
individual query generates a large amount of traffic
and large systems quickly become overwhelmed by the
query-induced load. This paper explores various
alternatives to Gnutella's query algorithm and data
replication strategy. We propose a query algorithm
based on multiple random walks that resolves queries
almost as quickly as Gnutella's flooding method while
reducing the network traffic by two orders of magnitude
in many cases. We also present a distributed
replication strategy that yields close-to-optimal
performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandramouli:2002:ALT,
author = "Y. Chandramouli and Arnold Neidhardt",
title = "Application level traffic measurements for capacity
engineering",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "260--261",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511370",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In general, the traffic characteristics of the
individual applications that constitute the aggregate
traffic on a network can be important for capacity
engineering. In this paper, we demonstrate based on
mathematical analysis the value of application specific
measurements even when there is no service
differentiation. In other words, under certain
assumptions, we obtain the result that errors in
engineering can occur, and in particular,
under-engineering can occur when traffic
characteristics of individual applications are ignored.
The assumptions are that the individual applications
can be modeled adequately as Fractional Brownian
Motions and that measurements are available only at
relatively coarse time scales. The results in this
paper emphasize the value of collecting fine-grained
traffic measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Williamson:2002:CAT,
author = "Carey Williamson and Qian Wu",
title = "Context-aware {TCP\slash IP}",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "262--263",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511371",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the design and evaluation of
CATNIP, a Context-Aware Transport/Network Internet
Protocol for the Web. This integrated protocol uses
application-layer knowledge (i.e., Web document size)
to provide explicit context information to the TCP and
IP protocols. While this approach violates the
traditional layered Internet protocol architecture, it
enables informed decision-making, both at network
endpoints and at network routers, regarding flow
control, congestion control, and packet discard
decisions. The ns-2 network simulator is used to
evaluate the performance of the context-aware TCP/IP
approach, using a simple network topology and a
synthetic Web workload. Simulation results indicate a
10-20\% reduction in TCP packet loss using simple
endpoint control mechanisms. More importantly, using
CATNIP context information at IP routers can produce
20-80\% reductions in the mean Web page retrieval
times, and 60-90\% reductions in the standard deviation
of retrieval times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Barakat:2002:IBT,
author = "Chadi Barakat and Patrick Thiran and Gianluca
Iannaccone and Christophe Diot",
title = "On {Internet} backbone traffic modeling",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "264--265",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511372",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The motivation of this work is to design a traffic
model that can be used in routers or by network
administrators to assist in network design and
management. Currently, network operators have very
basic information about the traffic. They mostly use
SNMP, which provides average throughput information
over 5 minutes intervals. An analytical model can
provide more accurate information on the traffic such
as its variation and its auto-correlation at short
timescales. In contrast to other works (see [2] and the
references therein), we choose to model the traffic on
a link that is {\em not\/} congested (congestion
possibly appears elsewhere in the Internet). This
assumption is valid (and in fact is the rule) for
backbone links that are generally over-provisioned
(i.e., the network is designed so that a backbone link
does not reach 50\% utilization in the absence of link
failure [4]). This choice is driven by our main
objective, which is to provide a link dimensioning tool
usable in backbone network management. We opt for a
model of the traffic at the flow level. Modeling the
traffic at the packet level is very difficult, since
traffic on a link is the result of a high level of
multiplexing of numerous flows whose behavior is
strongly influenced by the transport protocol and by
the application. A flow in our model is a very generic
notion. It can be a TCP connection or a UDP stream
(described by source and destination IP addresses,
source and destination port numbers and the protocol
number), or it can be a destination address prefix
(e.g., destination IP address in the form a.b.0.0/16).
The definition of a flow is deliberately kept general,
which allows our model to be applied to different
applications and to different transport mechanisms. The
model can however be specified to some particular
traffic types such as FTP and HTTP. By specifying the
model to a certain traffic type, one must expect to
obtain better results. Data flows arrive to a backbone
link at random times, transport a random volume of
data, and stay active for random periods. Given
information on flows, our model aims to compute the
total (aggregate) rate of data observed on the backbone
link. We are interested in capturing the dynamics of
the total data rate at short timescales (i.e., of the
order of hundreds of milliseconds). This dynamics can
be completely characterized using simple mathematical
tools, namely the shot-noise process [3]. Our main
contribution is the computation of simple expressions
for important measures of backbone traffic such as its
average, its variance, and its auto-correlation
function. These expressions are functions of a few
number of parameters that can be easily computed by a
router (e.g., using a tool such as NetFlow, which
provides flow information in Cisco routers).Our model
can be helpful for managing and dimensioning IP
backbone networks. Knowing the average and the variance
of the traffic allows an ISP to provision the links of
its backbone so as to avoid congestion. Congestion can
be avoided at short timescales of the order of hundreds
of milliseconds. The auto-correlation function of the
traffic can be used to propose predictors for its
future values. The prediction of the traffic has
diverse applications in managing the resources of the
backbone. One interesting application is the use of a
short-term prediction to optimize packet routing and
load balancing. Our model can also be used to assess
the impact on backbone traffic of changes made in the
rest of the Internet such as the addition of a new
customer, a new application, or a new transport
mechanism. The ISP can plan the provisioning of its
backbone so as to absorb the resulting change of
traffic before this change takes place.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:2002:SND,
author = "Alexander Thomasian and Chang Liu",
title = "Some new disk scheduling policies and their
performance",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "266--267",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advances in magnetic recording technology have
resulted in a rapid increase in disk capacities, but
improvements in the mechanical characteristics of disks
have been quite modest. For example, the access time to
random disk blocks has decreased by a mere factor of
two, while disk capacities have increased by several
orders of magnitude. OLTP applications subject disks to
a very demanding workload consisting of accesses to
randomly distributed disk blocks and gain limited
benefit from caching and prefetching (at the onboard
disk cache). We propose some new disk scheduling
methods to address the limited disk access bandwidth
problem. Some well-known disk scheduling methods are:
(i) FCFS. (ii) Shortest Seek Time First (SSTF). (iii)
SCAN and Cyclical SCAN (CSCAN). The latter moves the
disk arm to its beginning point after each SCAN so that
requests at all disk cylinders are treated
symmetrically. (iv) CSCAN with a lookahead of next {\em
i\/} requests (CSCAN-LAi) takes into account latency to
reorder their processing to minimize the sum of their
service times. (v) Shortest Access Time First (SATF),
which provides the best performance [2]. (vi) SATF with
lookahead for $i$ requests (SATF-LAi).In the case of
SATF-LAi with $i$ = 2 after the completion of request
$X$ the scheduler chooses requests $A$ and $B$ such
that the sum of their service times processed
consecutively, i.e., $ t_{X, A} + a t_{A, B}$, is
minimized. In {\em SATF with flexible lookahead\/} only
request $A$ is definitely processed and request $B$ is
processed provided that it is selected in the next
round. We refer to $a$ as the {\em discount factor\/}
($ 0 \leq a \leq 1$), because less weight is attached
to the service time of request $B$, since it may not be
processed after request $A$. The case $ a = 0$
corresponds to pure SATF. When $ a = 1$ we consider a
variant called {\em SATF with fixed lookahead\/} where
$B$ is processed unconditionally after $A$ before any
other (perhaps more favorable recent) requests. Thus
requests are processed two at a time, unless only one
request is available. More generally requests in the
temporal neighborhood of request $A$ are given higher
priority.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2002:SCC,
author = "Kang-Won Lee and Khalil Amiri and Sambit Sahu and
Chitra Venkatramani",
title = "On the sensitivity of cooperative caching performance
to workload and network characteristics",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "268--269",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511374",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A rich body of literature exists on several aspects of
cooperative caching [1, 2, 3, 4, 5], including object
placement and replacement algorithms [1], mechanisms
for reducing the overhead of cooperation [2, 3], and
the performance impact of cooperation [3, 4, 5].
However, while several studies have focused on
quantifying the performance benefit of cooperative
caching, their conclusions on the effectiveness of such
cooperation vary significantly. The source of this
apparent disagreement lies mainly in their different
assumptions about workload and network characteristics,
and about the degree of cooperation among caches. To
more comprehensively evaluate the practical benefit of
cooperative caching, we explore the sensitivity of the
benefit of cooperation to workload characteristics such
as {\em object popularity distribution, temporal
locality, one time referencing behavior}, and to
network characteristics such as {\em latencies between
clients, proxies, and servers.\/} Furthermore, we
identify a critical workload characteristic, which we
call {\em average access density}, and show that it has
a crucial impact on the effectiveness of cooperative
caching. In this extended abstract, we report on a few
important results selected from our extensive study
reported in [6]. In particular, assuming an LFU-based
cache management policy, we arrive at the following
conclusions. First, cooperative caching is only
effective when the {\em average access density\/}
(defined as the ratio of the number of requests to the
number of distinct objects in a time window) is
relatively high. Second, the effectiveness of
cooperative caching decreases as the skew in object
popularity increases. Higher skew means that only a
small number of objects are most frequently accessed
reducing the benefit of larger caches, and therefore of
cooperation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anantharaman:2002:MAT,
author = "Vaidyanathan Anantharaman and Raghupathy Sivakumar",
title = "A microscopic analysis of {TCP} performance over
wireless ad-hoc networks",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "270--271",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad-hoc networks are multi-hop wireless networks that
can operate without the services of an established
backbone infrastructure. While such networks have
obvious applications in the military and disaster
relief environments, more recent works that have
motivated their use even in regular wireless packet
data networks have increased their significance. The
focus of this paper is to study the performance of the
TCP transport layer protocol over ad-hoc networks.
Recent works in transport protocols for ad-hoc networks
have investigated the impact of ad-hoc network
characteristics on TCP's performance, and proposed
schemes that help TCP overcome the negative impact of
such characteristics as random wireless loss and
mobility. The primary mechanism proposed involves
sending an explicit link failure notification (ELFN) to
the source from the point of link failure. The source,
upon receiving the ELFN {\em freezes\/} TCP's timers
and state, re-computes a new route to the destination,
and either releases the timers and state or re-starts
them from their respective initial values. While the
goal of ELFN based approaches is to prevent the route
disruption time from adversely impacting TCP's
performance, in this paper we contend that there are
several other factors that influence TCP's performance
degradation. We briefly outline the different factors
below: $ \bullet $ {\em TCP Losses:\/} Every route
failure induces upto a TCP-window worth of packet
losses. While the losses have an absolute impact on the
performance degradation, the TCP source also reacts to
the losses by reducing the size of its window. Note
that ELFN will prevent this negative impact on TCP's
performance by appropriately freezing TCP's state. $
\bullet $ {\em MAC Failure Detection Time:\/} Since the
MAC layer (802.11) has to go through multiple
retransmissions before concluding link failure, there
is a distinct component associated with the time taken
to actually detect link failure since the occurrence of
the failure. Importantly, the detection time increases
with increasing load in the network. While an external
mechanism to detect link failures (e.g. through
periodic beacons at the routing layer) would solve this
problem, it comes at the cost of beacon overheads and
associated trade-offs. $ \bullet $ {\em MAC Packet
Arrival:\/} When a failure is detected as described
above, the link failure indication is sent only to the
source of the packet that triggered the detection. If
another source is using the same link in the path to
its destination, the node upstream of the link failure
will wait until it receives a packet from that source
before informing it of the link failure. This also
contributes to the magnitude of the delay after which a
source realizes that a path is broken. $ \bullet $ {\em
Route Computation Time:\/} Once a source is informed of
a path failure, the time taken to recompute the route
also increases with increasing load. With ELFN, for a
load of 25 connections, the per-flow average of the
aggregate time spent in route computation during a 100
second simulation was as high as 15 seconds. In
addition to the absolute impact of the idle periods,
TCP is also likely to experience timeouts, especially
in the heavily loaded scenarios where the route
computation time can be high. In the next section, we
present a framework of mechanisms called {\em Atra\/}
targeted toward addressing each of the above
components. We show through representative simulation
results that the proposed mechanisms outperform both
the default protocol stack and an ELFN-enabled protocol
stack substantially. We assume the default protocol
stack to comprise of the IEEE 802.11 MAC protocol, the
Dynamic Source Routing (DSR) routing protocol, and
TCP-NewReno as the transport layer protocol. For a more
detailed analysis of TCP performance in mobile ad-hoc
networks, and description of the Atra framework, please
see [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2002:ARS,
author = "Baek-Young Choi and Jaesung Park and Zhi-Li Zhang",
title = "Adaptive random sampling for load change detection",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "272--273",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511399.511376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Timely detection of changes in traffic load is
critical for initiating appropriate traffic engineering
mechanisms. Accurate measurement of traffic is
essential since the efficacy of change detection
depends on the accuracy of traffic estimation. However,
{\em precise\/} traffic measurement involves inspecting
{\em every\/} packet traversing a link, resulting in
significant overhead, particularly on high speed links.
{\em Sampling\/} techniques for traffic load {\em
estimation\/} are proposed as a way to limit the
measurement overhead. In this paper, we address the
problem of {\em bounding\/} sampling error within a
pre-specified tolerance level and propose an {\em
adaptive random sampling\/} technique that determines
the {\em minimum\/} sampling probability adaptively
according to traffic dynamics. Using real network
traffic traces, we show that the proposed adaptive
random sampling technique indeed produces the desired
accuracy, while also yielding significant reduction in
the amount of traffic samples. We also investigate the
impact of sampling errors on the performance of load
change detection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "change detection; sampling",
}
@Article{Zhao:2002:MEN,
author = "Zhili Zhao and Jayesh Ametha and Swaroop Darbha and A.
L. Narasimha Reddy",
title = "A method for estimating non-responsive traffic at a
router",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "274--275",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a scheme for estimating the
proportion of the incoming traffic that is not
responding to congestion at a router. The idea of the
proposed scheme is that if the observed queue length
and packet drop probability do not match with the
predicted results from the TCP model, then the error
must come from the non-responsive traffic; it can then
be used for estimating non-responsive traffic. The
proposed scheme utilizes queue length history, packet
drop history, expected TCP and queue dynamics to
estimate the proportion. We show that the proposed
scheme is effective over a wide range of traffic
scenarios through simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "control theory; estimation; non-responsive traffic;
traffic modeling",
}
@Article{Guo:2002:SFU,
author = "Liang Guo and Ibrahim Matta",
title = "Scheduling flows with unknown sizes: approximate
analysis",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "276--277",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511378",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous job scheduling studies indicate that
providing rapid response to interactive jobs which
place frequent but small demands, can reduce the
overall system average response time [1], especially
when the job size distribution is skewed (see [2] and
references therein). Since the distribution of Internet
flows is skewed, it is natural to design a network
system that favors short file transfers through service
differentiation. However, to maintain system
scalability, detailed per-flow state such as flow
length is generally not available inside the network.
As a result, we usually resort to a threshold-based
heuristic to identify and give preference to short
flows. Specifically, packets from a new flow are always
given the highest priority. However, the priority is
reduced once the flow has transferred a certain amount
of packets. In this paper, we use the MultiLevel (ML)
feedback queue [3] to characterize this discriminatory
system. However, the solution given in [3] is in the
form of an integral equation, and to date the equation
has been solved only for job size distribution that has
the form of mixed exponential functions. We adopt an
alternative approach, namely using a conservation law
by Kleinrock [1], to solve for the average response
time in such system. To that end, we approximate the
average response time of jobs by a linear function in
the job size and solve for the stretch (service
slowdown) factors. We show by simulation that such
approximation works well for job (flow) size
distributions that possess the heavy-tailed property
[2], although it does not work so well for exponential
distributions. Due to the limited space available, in
Section 2 we briefly describe the queueing model and
summarize our approximation approach to solving for the
average response time of the M/G/1/ML queueing system.
We conclude our paper in Section 3.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alouf:2002:FVC,
author = "Sara Alouf and Fabrice Huet and Philippe Nain",
title = "Forwarders vs. centralized server: an evaluation of
two approaches for locating mobile agents",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "278--279",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Internet has allowed the creation of huge amounts
of data located on many sites. Performing complex
operations on some data requires that the data be
transferred first to the machine on which the
operations are to be executed, which may require a
non-negligible amount of bandwidth and may seriously
limit performance if it is the bottleneck. However,
instead of moving the data to the code, it is possible
to move the code to the data, and perform all the
operations locally. This simple idea has led to a new
paradigm called {\em code-mobility:\/} a mobile object
--- sometimes called an agent --- is given a list of
destinations and a series of operations to perform on
each one of them. The agent will visit all of the
destinations, perform the requested operations and
possibly pass the result on to another object. Any
mobility mechanism must first provide a way to migrate
code from one host to another. It must also ensure that
any communication following a migration will not be
impaired by it, namely that two objects should still be
able to communicate even if one of them has migrated.
Such a mechanism is referred to as a {\em location\/}
mechanism since it often relies on the knowledge of the
location of the objects to ensure communications. Two
location mechanisms are widely used: the first one uses
a centralized server whereas the second one relies on
special objects called {\em forwarders.\/} This paper
evaluates and compares the performance of an existing
implementation of these approaches in terms of cost of
communication in presence of migration. Based on a
Markov chain analysis, we will construct and solve two
mathematical models, one for each mechanism and will
use them to evaluate the cost of location. For the
purpose of validation, we have developed for each
mechanism a benchmark that uses {\em ProActive\/} [2],
a Java library that provides all the necessary
primitives for code mobility. Experiments conducted on
a LAN and on a MAN have validated both models and have
shown that the location server always performs better
than the forwarders. Using our analytical models we
will nevertheless identify situations where the
opposite conclusion holds. However, under most
operational conditions location servers will perform
better than forwarders.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2002:TCR,
author = "Hyunseok Chang and Ramesh Govindan and Sugih Jamin and
Scott J. Shenker and Walter Willinger",
title = "Towards capturing representative {AS}-level {Internet}
topologies",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "280--281",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For the past two years,there has been a significant
increase in research activities related to studying and
modeling the Internet's topology, especially at the
level of {\em autonomous systems\/} (ASs). A closer
look at the measurements that form the basis for all
these studies reveals that the data sets used consist
of the BGP routing tables collected by the Oregon route
server (henceforth, the {\em Oregon route-views\/})
[1]. So far, there has been anecdotal evidence and an
intuitive understanding among researchers in the field
that BGP-derived AS connectivity is not complete.
However, as far as we know, there has been no
systematic study on {\em quantifying\/} the
completeness of currently known AS-level Internet
topologies. Our main objective in this paper is to
quantify the completeness of Internet AS maps
constructed from the Oregon route-views and to attempt
to capture {\em more representative\/} AS-level
Internet topology. One of the main contributions of
this paper is in developing a methodology that enables
quantitative investigations into issues related to the
(in)completeness of BGP-derived AS maps.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brownlee:2002:ISS,
author = "Nevil Brownlee and kc claffy",
title = "{Internet} stream size distributions",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "282--283",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present and discuss stream size and lifetime
distributions for web and non-web TCP traffic on a
campus OC12 link at UC San Diego. The distributions are
stable over long periods, and show that on this link
only 3\% of the streams last longer than one minute,
and that only about 0.5\% of them are bigger than 100
kBytes. Although there are large streams (elephants) on
this link, the bulk of its traffic is composed of many
small streams (mice).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2002:CLD,
author = "Yingwu Zhu and Yiming Hu",
title = "Can large disk built-in caches really improve system
performance?",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "284--285",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511382",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Via detailed file system and disk system simulation,
we examine the impact of disk built-in caches on the
system performance. Our results indicate that the
current trend of using large built-in caches is
unnecessary and a waste of money and power for most
users. Disk manufacturers could use much smaller
built-in caches to reduce the cost as well as
power-consumption, without affecting performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Simmonds:2002:WSB,
author = "Rob Simmonds and Carey Williamson and Russell Bradford
and Martin Arlitt and Brian Unger",
title = "{Web} server benchmarking using parallel {WAN}
emulation",
journal = j-SIGMETRICS,
volume = "30",
number = "1",
pages = "286--287",
month = jun,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/511334.511383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:38:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the use of a parallel
discrete-event network emulator called the Internet
Protocol Traffic and Network Emulator (IP-TNE) for Web
server benchmarking. The experiments in this paper
demonstrate the feasibility of high-performance WAN
emulation using parallel discrete-event simulation
techniques on shared-memory multiprocessors. Our
experiments with the Apache Web server achieve 3400
HTTP transactions per second for simple Web workloads,
and 1000 HTTP transactions per second for realistic Web
workloads, for static document retrieval across
emulated WAN topologies of up to 4096 concurrent
Web/TCP clients. The results show that WAN
characteristics, including round-trip delays, link
speeds, packet losses, packet sizes, and bandwidth
asymmetry, all have significant impacts on Web server
performance. WAN emulation enables stress testing and
benchmarking of Web server performance in ways that may
not be possible in simple LAN test scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Almeida:2002:AWB,
author = "Virgilio Almeida and Martin Arlitt and Jerry Rolia",
title = "Analyzing a {Web}-based system's performance measures
at multiple time scales",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "3--9",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web and e-commerce workloads are known to vary
significantly from hour to hour, day to day, and week
to week. The causes of these fluctuations are changes
in the number of users visiting a site and the mix of
services they require. Since the workloads are known to
vary over time, one should not simply choose an
arbitrary time interval and consider it as a reference
for performance evaluation. We conclude that times
scales are of great importance for operational
analysis, particularly for systems with bursty loads.
Service level agreements must certainly take into
account measurement time scales. Similarly input
parameters for predictive models are sensitive to time
scale. Ultimately, a time scale should be chosen for
service level requirements that best expresses the
needs of end-users and the price the owner of a site is
willing to pay for QoS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andreolini:2002:PSD,
author = "Mauro Andreolini and Michele Colajanni and Ruggero
Morselli",
title = "Performance study of dispatching algorithms in
multi-tier {Web} architectures",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "10--20",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The number and heterogeneity of requests to Web sites
are increasing also because the Web technology is
becoming the preferred interface for information
systems. Many systems hosting current Web sites are
complex architectures composed by multiple server
layers with strong scalability and reliability issues.
In this paper we compare the performance of several
combinations of centralized and distributed dispatching
algorithms working at the first and second layer, and
using different levels of state information. We confirm
some known results about load sharing in distributed
systems and give new insights to the problem of
dispatching requests in multi-tier cluster-based Web
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2002:SND,
author = "Yan Chen and Khian Hao Lim and Randy H. Katz and Chris
Overton",
title = "On the stability of network distance estimation",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "21--30",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Overlay network distance monitoring and estimation
system can benefit many new applications and services,
such as peer-to-peer overlay routing and location.
However, there is a lack of such scalable system with
small overhead, good usability, and good distance
estimation accuracy and stability. Thus we propose a
scalable overlay distance monitoring system, {\em
Internet Iso-bar}, which clusters hosts based on the
similarity of their perceived network distance, with no
assumption about the underlying network topology. The
centers of each cluster are then chosen as monitors to
represent their clusters for probing and distance
estimation. We compare it with other network distance
estimation systems, such as Global Network Positioning
(GNP) [1]. Internet Iso-bar is easy to implement and
use, and has good scalability and small communication
and computation cost for online monitoring. Preliminary
evaluation on real Internet measurement data also shows
that Internet Iso-bar has high prediction accuracy and
stability. Finally, by adjusting the number of
clusters, we can smoothly trade off the measurement and
management cost for better distance estimation accuracy
and stability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:2002:DSP,
author = "Alexander Thomasian and Chang Liu",
title = "Disk scheduling policies with lookahead",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "31--40",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advances in magnetic recording technology have
resulted in a rapid increase in disk capacities, but
improvements in the mechanical characteristics of disks
have been quite modest. For example the access time to
random disk blocks has decreased by a mere factor of
two, while disk capacities have increased by several
orders of magnitude. High performance OLTP applications
subject disks to a very demanding workload, since they
require high access rates to randomly distributed disk
blocks and gain limited benefit from caching and
prefetching. We address this problem by re-evaluating
the performance of some well known disk scheduling
methods, before proposing and evaluating extensions to
them. A variation to CSCAN takes into account
rotational latency, so that the service time of further
requests is reduced. A variation to SATF considers the
sum of service times of several successive requests in
scheduling the next request, so that the arm is moved
to a (temporal) neighborhood with many requests. The
service time of further requests is discounted, since
their immediate processing is not guaranteed. A
variation to the SATF policy prioritizes reads with
respect to writes and processes winner write requests
conditionally, i.e., when the ratio of their service
time to that of the winner read request is smaller than
a certain threshold. We review previous work to put our
work into the proper perspective and discuss plans for
future work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk scheduling; LOOK; SATF; SCAN; scheduling policies
with lookahead; simulation",
}
@Article{Brandwajn:2002:NSB,
author = "Alexandre Brandwajn",
title = "A note on {SCSI} bus waits",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "41--47",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the SCSI-2 standard, the unique IDs of devices on
the bus define a fixed priority whenever several
devices compete for the use of the bus. Although the
more recent SCSI-3 standard specifies an additional
fair arbitration mode, it leaves such fair mode an
optional feature. Despite a number of allusions to
potential unfairness of the traditional SCSI bus
arbitration scattered in the trade literature, there
seem to be few formal studies to quantify this
unfairness. In this paper, we propose a simple model of
SCSI bus acquisition in which devices on the bus are
viewed as sources of requests with fixed non-preemptive
priorities. We use the model to assess the expected
extent of unfairness, as measured by the mean bus wait,
under varying load conditions. Effects of tagged
command queueing are not considered in this note.
Numerical results obtained with our model show that
there is little unfairness as long as the workload is
balanced across devices and the bus utilization is
relatively low. Interestingly, even for medium bus
utilization a significant fraction of bus requests find
the bus free which might correlate with the service
rounds noted in a recent experimental study. For
unbalanced loads and higher bus utilization, the
expected wait for the bus experienced by lowest
priority devices can become significantly larger than
the one experienced by highest priority device. This
appears to be especially true if the higher priority
devices have higher I/O rates and occupy the bus for
longer periods. As might be expected, even for balanced
workloads, unfairness tends to increase with the number
of devices on the bus.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:2002:PSP,
author = "Daniel A. Menasc{\'e} and Lavanya Kanchanapalli",
title = "Probabilistic scalable {P2P} resource location
services",
journal = j-SIGMETRICS,
volume = "30",
number = "2",
pages = "48--58",
month = sep,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/588160.588167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable resource discovery services form the core of
directory and other middleware services. Scalability
requirements preclude centralized solutions. The need
to have directory services that are highly robust and
that can scale with the number of resources and the
performance of individual nodes, points to Peer-to-Peer
(P2P) architectures as a promising approach. The
resource location problem can be simply stated as
`given a resource name, find the location of a node or
nodes that manage the resource.' We call this the {\em
deterministic\/} location problem. In a very large
network, it is clearly not feasible to contact all
nodes to locate a resource. Therefore, we modify the
problem statement to `given a resource name, find with
a given probability, the location of a node or nodes
that manage the resource.' We call this a {\em
probabilistic\/} location approach. We present a
protocol that solves this problem and develop an
analytical model to compute the probability that a
directory entry is found, the fraction of peers
involved in a search, and the average number of hops
required to find a directory entry. Numerical results
clearly show that the proposed approach achieves high
probability of finding the entry while involving a
relatively small fraction of the total number of peers.
The analytical results are further validated by results
obtained from an implementation of the proposed
protocol in a cluster of workstations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2002:SIW,
author = "Mark S. Squillante",
title = "Special issue on the {Workshop on MAthematical
performance Modeling and Analysis (MAMA 2002)}",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "2--2",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2002:APP,
author = "Shengke Yu and Marianne Winslett and Jonghyun Lee and
Xiaosong Ma",
title = "Automatic and portable performance modeling for
parallel {I/O}: a machine-learning approach",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "3--5",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A performance model for a parallel I/O system is
essential for detailed performance analyses, automatic
performance optimization of I/O request handling, and
potential performance bottleneck identification. Yet
how to build a portable performance model for parallel
I/O system is an open problem. In this paper, we
present a machine-learning approach to automatic
performance modeling for parallel I/O systems. Our
approach is based on the use of a platform-independent
performance metamodel, which is a radial basis function
neural network. Given training data, the metamodel
generates a performance model automatically and
efficiently for a parallel I/O system on a given
platform. Experiments suggest that our goal of having
the generated model provide accurate performance
predictions is attainable, for the parallel I/O library
that served as our experimental testbed on an IBM SP.
This suggests that it is possible to model parallel I/O
system performance automatically and portably, and
perhaps to model a broader class of storage systems as
well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2002:EFL,
author = "Alma Riska and Vesselin Diev and Evgenia Smirni",
title = "Efficient fitting of long-tailed data sets into
phase-type distributions",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "6--8",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605525",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new technique for fitting long-tailed
data sets into phase-type (PH) distributions. This
technique fits data sets with non-monotone densities
into a mixture of Erlang and hyperexponential
distributions, and data sets with completely monotone
densities into hyperexponential distributions. The
method first partitions the data set in a divide and
conquer fashion and then uses the
Expectation-Maximization (EM) algorithm to fit the data
of each partition into a PH distribution. The fitting
results for each partition are combined to generate the
final fitting for the entire data set. The new method
is accurate, efficient, and allows one to apply
existing analytic tools to analyze the behavior of
queueing systems that operate under workloads that
exhibit long-tail behavior, such as queues in
Internet-related systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2002:USL,
author = "Mor Harchol-Balter and Karl Sigman and Adam Wierman",
title = "Understanding the slowdown of large jobs in an
{M/GI/1} system",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "9--11",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We explore the performance of an M/GI/1 queue under
various scheduling policies from the perspective of a
new metric: the it slowdown experienced by largest
jobs. We consider scheduling policies that bias against
large jobs, towards large jobs, and those that are
fair, e.g., Processor-Sharing. We prove that as job
size increases to infinity, all work conserving
policies converge almost surely with respect to this
metric to no more than 1/(1-{\rho}), where {\rho}
denotes load. We also find that the expected slowdown
under any work conserving policy can be made
arbitrarily close to that under Processor-Sharing, for
all job sizes that are sufficiently large.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Golubchik:2002:MPS,
author = "Leana Golubchik and John C. S. Lui",
title = "Multi-path streaming: is it worth the trouble?",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "12--14",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605527",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2002:FSU,
author = "Y. Baryshnikov and E. Coffman and P. Jelenkovi{\'c}
and P. Mom{\v{c}}ilovi{\'c} and D. Rubenstein",
title = "Flood search under the {California} split strategy",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "15--16",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605528",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a version of the problem of searching
peer-to-peer networks by means of {\em floods}, or {\em
expanding rings\/}; when a network reduces to a path,
then the term flood becomes the more familiar search
term `scan,' which is the focus of this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2002:RTE,
author = "Vladimir Marbukh",
title = "Robust traffic engineering: game theoretic
perspective",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "17--19",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605529",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "On-line routing algorithms deal with requests as they
arrive without assuming any knowledge of the underlying
process that generates the streams of requests. By
contrast, off-line traffic engineering algorithms
assume complete statistical knowledge of the request
generating process. This dichotomy, however,
oversimplifies many practical situations when some
incomplete information on the expected demands is
available, and proper utilization of the available
information may improve the network performance. This
paper proposes a game theoretic framework for robust
traffic engineering intended to guard against the worst
case scenario with respect to possible uncertainties in
the external demands and link loads. The proposed
framework can be interpreted as a game of the routing
algorithm attempting to optimize the network
performance and the adversarial environment attempting
to obstruct these efforts by selecting the worst case
scenario with respect to the uncertainties. Two
different classes of schemes are considered: first,
suitable for {\em MPLS\/} implementation, centralized
schemes, and, second, suitable for {\em OSPF-OMP\/}
implementation, decentralized schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "equal cost multi-path; game theoretic framework; MPLS;
OSPF-OMP; robustness; stability; traffic engineering;
uncertain demand",
}
@Article{Benaboud:2002:ASC,
author = "H. Benaboud and A. Berqia and N. Mikou",
title = "An analytical study of {CANIT} algorithm in {TCP}
protocol",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "20--22",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605530",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "{\em CANIT\/} (Congestion Avoidance with Normalized
Interval of Time) algorithm is a new policy for TCP
congestion avoidance which is proposed in order to
improve TCP fairness over long delay links. {\em
CANIT\/} uses a new parameter referred to NIT ({\em
Normalized Interval of Time\/}), which is the key of
this algorithm. In former works, we showed by
simulations of some configuration with various value of
NIT parameter, that using our algorithm instead of the
standard one, improves the TCP fairness as well as the
utilisation of network resources. In this work, we
propose an analytical study and we give the basic
equations in order to find the optimal value of NIT
parameter which provides more fairness and better
bandwidth utilisation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kamal:2002:MTR,
author = "Ahmed E. Kamal",
title = "Modeling {TCP Reno} with {RED}-based routers",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "23--25",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605531",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The purpose of this paper is to introduce an accurate
performance model for the TCP Reno version in the
presence of a bottlenecked router which uses the Random
Early Detection (RED) active queue The paper makes two
contributions: $ \bullet $ It introduces an accurate
model of a target source operating according to the TCP
Reno mechanism in the presence of background traffic.
The background traffic is represented by a general
discrete batch Markov arrival process (D-BMAP), which
is modified in order to make the phase transitions
dependent on packet losses. It can therefore be used to
model a collection of UDP and/or TCP sources. Under
this model, packets are dropped only when the router is
congested, or when the RED protocol is invoked, i.e.,
the buffer occupancy is taken into account. $ \bullet $
The paper also introduces an accurate model of the RED
mechanism, which tracks the evolution of the difference
between the instantaneous and average queue sizes. This
representation is chosen since the average queue size
tends to follow the instantaneous queue size, and
therefore the difference between them is usually
limited. This model is different from the models
presented in the literature for TCP in a number of
ways. Unlike [1, 2] where packet losses are random, and
independent of the actual buffer occupancy, our model
captures the buffer occupancy, and the actual packet
losses due to buffer overflow. This paper also models
the cross traffic using a general process. Reference
[3] considered the effect of cross traffic only by
modeling the service times using a random process.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Samios:2002:MTT,
author = "Charalampos (Babis) Samios and Mary K. Vernon",
title = "Modeling throughput in {TCP Vegas}",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "26--28",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605532",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This abstract describes a simple and accurate analytic
model for the steady state throughput of TCP Vegas, as
a function of round trip time and packet loss rate.
Such models have previously been developed for TCP
Reno. However, several aspects of TCP Vegas need to be
treated quite differently from their counterparts in
Reno. In particular, TCP Vegas employs an algorithm to
detect the incipient stages of congestion in the
network and preemptively adjusts the sending rate to
avoid losses. The proposed model reflects this
behavior, as well as Vegas' new slow start mechanism,
and the most important of the innovative congestion
recovery mechanisms introduced in TCP Vegas. Initial
validations against the ns-2 simulator configured to
simulate TCP Vegas are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandramouli:2002:MAU,
author = "Y. Chandramouli and Arnold L. Neidhardt",
title = "Mathematical analysis of the use of application-level
traffic measurements for capacity engineering",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "29--31",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605533",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In general, the traffic characteristics of the
individual applications that constitute the aggregate
traffic on a network can be important for capacity
engineering. In this paper, we demonstrate, based on
mathematical analysis, the value of
application-specific measurements, even when there is
no service differentiation. Specifically, under certain
assumptions, we obtain the result that engineering
errors occur when traffic characteristics of individual
applications are ignored, and that the errors are in
the direction of under-engineering. The assumptions are
that the individual applications can be modeled
adequately as Fractional Brownian Motions and that
measurements are available only at relatively coarse
time scales (as is common presently). The results in
this paper emphasize the value of collecting
fine-grained traffic measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xia:2002:TMP,
author = "Cathy H. Xia and Zhen Liu and Mark S. Squillante and
Li Zhang and Naceur Malouch",
title = "Traffic modeling and performance analysis of
commercial {Web} sites",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "32--34",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605534",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haas:2002:VLR,
author = "Peter J. Haas and Peter W. Glynn",
title = "On the validity of long-run estimation methods for
discrete-event systems",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "35--37",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605535",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2002:CSP,
author = "David Gamarnik",
title = "Computing stationary probability distributions and
large deviation rates for constrained random walks: the
undecidability results",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "38--40",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605536",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our model is a constrained homogeneous random walk in
$ Z + d $. The convergence to stationarity for such a
random walk can often be checked by constructing a
Lyapunov function. The same Lyapunov function can also
be used for computing approximately the stationary
distribution of this random walk, using methods
developed in [11]. In this paper we show that computing
exactly the stationary probability for this type of
random walks is an undecidable problem: no algorithm
can exist to achieve this task. We then prove that
computing large deviation rates for this model is also
an undecidable problem. We extend these results to a
certain type of queueing systems. The implication of
these results is that no useful formulas for computing
stationary probabilities and large deviations rates can
exist in these systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harrison:2002:PFC,
author = "Peter G. Harrison",
title = "Product-forms from a {CAT} and {DOG}",
journal = j-SIGMETRICS,
volume = "30",
number = "3",
pages = "41--43",
month = dec,
year = "2002",
CODEN = "????",
DOI = "https://doi.org/10.1145/605521.605537",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:40:46 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The equilibrium state space probabilities of a
stationary Markov chain can be obtained immediately
from its reversed process. There are two main steps in
the derivation of product-form solutions for
multi-dimensional Markov chains using this approach.
First, the reversed process must be determined. This is
achieved for a wide class of cooperating processes
using a compound agent theorem (CAT), a compositional
result from Markovian Process Algebra (MPA). Secondly,
a path to each state must be found from some specified
reference state. This is usually obtained in a simple
way by considering the components of the state in order
of dimension, e.g. in a dimension-ordered graphical
(DOG) representation. In this note, the main results
for reversing a stationary compound Markov process,
under appropriate conditions, are given and applied to
deriving product-forms. No balance equations are
solved.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Allman:2003:EXR,
author = "Mark Allman",
title = "An evaluation of {XML-RPC}",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "2--11",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper explores the complexity and performance of
the XML-RPC system for remote method invocation. We
developed a program that can use either XML-RPC-based
network communication or a hand-rolled version of
networking code based on the java.net package. We first
compare our two implementations using traditional
object-oriented metrics. In addition, we conduct tests
over a local network and the Internet to assess the
performance of the two versions of the networking code
using traditional internetworking metrics. We find that
XML-RPC reduces the programming complexity of the
software by roughly 50\% (across various metrics). On
the other hand, the hand-rolled java.net-based
implementation offers up to an order of magnitude
better network performance in some of our tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weissman:2003:GES,
author = "Jon Weissman",
title = "Guest editorial: special issue on grid computing",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "12--12",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Taylor:2003:PIP,
author = "Valerie Taylor and Xingfu Wu and Rick Stevens",
title = "{Prophesy}: an infrastructure for performance analysis
and modeling of parallel and {Grid} applications",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "13--18",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance is an important issue with any
application, especially grid applications. Efficient
execution of applications requires insight into how the
system features impact the performance of the
applications. This insight generally results from
significant experimental analysis and possibly the
development of performance models. This paper present
the Prophesy system, for which the novel component is
the model development. In particular, this paper
discusses the use of our {\em coupling parameter\/}
(i.e., a metric that attempts to quantify the
interaction between kernels that compose an
application) to develop application models. We discuss
how this modeling technique can be used in the analysis
of grid applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "grid applications; grid systems; parallel
applications; performance analysis; performance
modeling",
}
@Article{Lowekamp:2003:CAP,
author = "Bruce B. Lowekamp",
title = "Combining active and passive network measurements to
build scalable monitoring systems on the {Grid}",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "19--26",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Because the network provides the wires that connect a
grid, understanding the performance provided by a
network is crucial to achieving satisfactory
performance from many grid applications. Monitoring the
network to predict its performance for applications is
an effective solution, but the costs and scalability
challenges of actively injecting measurement traffic,
as well as the information access and accuracy
challenges of using passively collected measurements,
complicate the problem of developing a monitoring
solution for a global grid. This paper is a preliminary
report on the Wren project, which is focused on
developing scalable solutions for network performance
monitoring. By combining active and passive monitoring
techniques, Wren is able to reduce the need for
invasive measurements of the network without
sacrificing measurement accuracy on either the WAN or
LAN levels. Specifically, we present topology-based
steering, which dramatically reduces the number of
measurements taken for a system by using passively
acquired topology and utilization to select the
bottleneck links that require active bandwidth probing.
Furthermore, by using passive measurements while an
application is running and active measurements when
none is running, we preserve our ability to offer
accurate, timely predictions of network performance,
while eliminating additional invasive measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Snavely:2003:BGC,
author = "Allan Snavely and Greg Chun and Henri Casanova and Rob
F. {Van der Wijngaart} and Michael A. Frumkin",
title = "Benchmarks for {Grid} computing: a review of ongoing
efforts and future directions",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "27--32",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773062",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Grid architectures are collections of computational
and data storage resources linked by communication
channels for shared use. It is important to deploy
measurement methods so that Grid applications and
architectures can evolve guided by scientific
principles. Engineering pursuits need agreed upon
metrics---a common language for communicating results,
so that alternative implementations can be compared
quantitatively. Users of systems need performance
parameters that describe system capabilities so that
they can develop and tune their applications.
Architects need examples of how users will exercise
their system to improve the design. The Grid community
is building systems such as the TeraGrid [1] and The
Informational Power Grid [2] while applications that
can fully benefit from such systems are also being
developed. We conclude that the time to develop and
deploy sets of Grid benchmarks is now. This article
reviews fundamental principles, early efforts, and
benefits of Grid benchmarks to the study and design of
Grids.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmarks; grid computing",
}
@Article{Lu:2003:GGR,
author = "Dong Lu and Peter A. Dinda",
title = "{GridG}: generating realistic computational grids",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "33--40",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A realistic workload is essential in evaluating
middleware for computational grids. One important
component of that workload is the raw grid itself: an
annotated graph representing the network topology and
the hardware and software available on each node and
link within it. GridG is an extensible synthetic
generator of such graphs that is implemented as a
series of transformations on a common graph format. The
paper provides a definition of and requirements for
grid generation. We then describe the GridG process in
two steps: topology generation and annotation. For
topology generation, we have both a model and a
mechanism. We leverage Tiers, an existing tool commonly
used in the networking community, but we extend it to
produce graphs that conform to recently discovered
power laws of Internet topology. We also contribute to
the theory of network topology by pointing out a
contradiction between two laws, and proposing a new
version of one of them. For annotation, we have
developed a mechanism, the {\em requirements\/} for a
model, and identified the open problem of
characterizing the distribution and correlation of
hardware and software resources on the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wolski:2003:EPR,
author = "Rich Wolski",
title = "Experiences with predicting resource performance
on-line in computational grid settings",
journal = j-SIGMETRICS,
volume = "30",
number = "4",
pages = "41--49",
month = mar,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/773056.773064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:22 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe methods for predicting the
performance of Computational Grid resources (machines,
networks, storage systems, etc.) using computationally
inexpensive statistical techniques. The predictions
generated in this manner are intended to support
adaptive application scheduling in Grid settings, and
on-line fault detection. We describe a
mixture-of-experts approach to non-parametric,
univariate time-series forecasting, and detail the
effectiveness of the approach using example data
gathered from `production' (i.e. non-experimental)
Computational Grid installations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Girbal:2003:DSR,
author = "Sylvain Girbal and Gilles Mouchard and Albert Cohen
and Olivier Temam",
title = "{DiST}: a simple, reliable and scalable method to
significantly reduce processor architecture simulation
time",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "1--12",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While architecture simulation is often treated as a
methodology issue, it is at the core of most processor
architecture research works, and simulation speed is
often the bottleneck of the typical trial-and-error
research process. To speedup simulation during this
research process and get trends faster, researchers
usually reduce the trace size. More sophisticated
techniques like trace sampling or distributed
simulation are scarcely used because they are
considered unreliable and complex due to their impact
on accuracy and the associated warm-up issues. In this
article, we present DiST, a practical distributed
simulation scheme where, unlike in other simulation
techniques that trade accuracy for speed, the user is
relieved from most accuracy issues thanks to an
automatic and dynamic mechanism for adjusting the
warm-up interval size. Moreover, the mechanism is
designed so as to always privilege accuracy over
speedup. The speedup scales with the amount of
available computing resources, bringing an average 7.35
speedup on 10 machines with an average IPC error of
1.81\% and a maximum IPC error of 5.06\%.Besides
proposing a solution to the warm-up issues in
distributed simulation, we experimentally show that our
technique is significantly more accurate than trace
size reduction or trace sampling for identical
speedups. We also show that not only the error always
remains small for IPC and other metrics, but that a
researcher can reliably base research decisions on DiST
simulation results. Finally, we explain how the DiST
tool is designed to be easily pluggable into existing
architecture simulators with very few modifications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed simulation; processor architecture",
}
@Article{Aamodt:2003:FMO,
author = "Tor M. Aamodt and Pedro Marcuello and Paul Chow and
Antonio Gonz{\'a}lez and Per Hammarlund and Hong Wang
and John P. Shen",
title = "A framework for modeling and optimization of prescient
instruction prefetch",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "13--24",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781030",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a framework for modeling
macroscopic program behavior and applies it to
optimizing prescient instruction prefetch --- novel
technique that uses helper threads to improve
single-threaded application performance by performing
judicious and timely instruction prefetch. A helper
thread is initiated when the main thread encounters a
spawn point, and prefetches instructions starting at a
distant target point. The target identifies a code
region tending to incur I-cache misses that the main
thread is likely to execute soon, even though
intervening control flow may be unpredictable. The
optimization of spawn-target pair selections is
formulated by modeling program behavior as a Markov
chain based on profile statistics. Execution paths are
considered stochastic outcomes, and aspects of program
behavior are summarized via path expression mappings.
Mappings for computing reaching, and posteriori
probability; path length mean, and variance; and
expected path footprint are presented. These are used
with Tarjan's fast path algorithm to efficiently
estimate the benefit of spawn-target pair selections.
Using this framework we propose a spawn-target pair
selection algorithm for prescient instruction prefetch.
This algorithm has been implemented, and evaluated for
the Itanium Processor Family architecture. A limit
study finds 4.8\%to 17\% speedups on an in-order
simultaneous multithreading processor with eight
contexts, over nextline and streaming I-prefetch for a
set of benchmarks with high I-cache miss rates. The
framework in this paper is potentially applicable to
other thread speculation techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; helper threads; instruction
prefetch; multithreading; optimization; path
expressions",
}
@Article{Xia:2003:QSL,
author = "Cathy H. Xia and Zhen Liu",
title = "Queueing systems with long-range dependent input
process and subexponential service times",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "25--36",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the asymptotic tail distribution of
stationary waiting times and stationary virtual waiting
times in a single-server queue with long-range
dependent arrival process and subexponential service
times. We investigate the joint impact of the long
range dependency of the arrival process and of the tail
distribution of the service times. We consider two
traffic models that have been widely used to
characterize the long-range dependence structure,
namely, the M/G/8 input model and the Fractional
Gaussian Noise (FGN) model. We focus on the response
times of the customers in a First-Come First-Serve
(FCFS) queueing system, although the results carry
through to the backlog distribution of the system with
any arbitrary queueing discipline. When the arrival
process is driven by an M/G/8 input model we show that
if the residual service time tail distribution $ F_e $
is lighter than the residual session duration $ G_e $,
then the stationary waiting time is dominated by the
long-range dependence structure, which is determined by
the residual session duration $ G_e $. If the residual
service time distribution $ F_e $ is heavier than the
residual session duration $ G_e $, then the tail
distribution of the stationary waiting time is
dominated by that of the residual service time. When
the arrival process is modeled by an FGN, we show that
the waiting time tail distribution is asymptotically
equal to the tail distribution of the residual service
time if the latter is asymptotically heavier than
Weibull distribution with shape parameter $ 2 - 2 H $,
where $H$ is the Hurst parameter of the FGN. If,
however, this residual service time is asymptotically
lighter than Weibull distribution with shape parameter
$ 2 - 2 H$, then the waiting time tail distribution is
dominated by the dependence structure of the arrival
process so that it is asymptotically equal to Weibull
distribution with shape parameter $ 2 - 2 H$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic queueing analysis; FGN; long-range
dependency; M/G/8; subexponential distributions",
}
@Article{Galmes:2003:ACM,
author = "Sebasti{\`a} Galm{\'e}s and Ramon Puigjaner",
title = "An algorithm for computing the mean response time of a
single server queue with generalized on\slash off
traffic arrivals",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "37--46",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, an exact solution for the response time
distribution of a single server, infinite capacity,
discrete-time queue is presented. This queue is fed by
a flexible discrete-time arrival process, which follows
an on/off evolution. A workload variable is associated
with each arrival instant, which may correspond to the
service demand generated by a single arrival, or
represent the number of simultaneous arrivals (bulk
arrivals). Accordingly, the analysis focuses on two
types of queues: (On/Off)/G/1 and (Batch-On/Off)/D/1.
For both cases, a decomposition approach is carried
out, which divides the problem into two contributions:
the response time experienced by single bursts in
isolation, and the increase on the response time caused
by the unfinished work that propagates from burst to
burst. Particularly, the solution for the unfinished
work is derived from a Wiener--Hopf factorization of
random walks, which was already used in the analysis of
discrete GI/G/1 queues. Compared to other related
works, the procedure proposed in this paper is exact,
valid for any traffic intensity and has no constraints
on the distributions of the input random variables
characterizing the process: duration of on and off
periods, and workload. From the general solution, an
efficient and robust iterative algorithm for computing
the expected response time of both queues is developed,
which can provide results at any desired precision.
This algorithm is numerically evaluated for different
types of input distributions and proved against
simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "arrival process; Markov chain; queuing model; response
time; steady-state",
}
@Article{Garetto:2003:MSM,
author = "Michele Garetto and Don Towsley",
title = "Modeling, simulation and measurements of queuing delay
under long-tail {Internet} traffic",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "47--57",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe an analytical approach for
estimating the queuing delay distribution on an
Internet link carrying realistic TCP traffic, such as
that produced by a large number of finite-size
connections transferring files whose sizes are taken
from a long-tail distribution. The analytical
predictions are validated against detailed simulation
experiments and real network measurements. Despite its
simplicity, our model proves to be accurate and robust
under a variety of operating conditions, and offers
novel insights into the impact on the network of
long-tail flow length distributions. Our contribution
is a performance evaluation methodology that could be
usefully employed in network dimensioning and
engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Markovian models; queueing analysis; TCP",
}
@Article{Bohacek:2003:HSM,
author = "Stephan Bohacek and Jo{\~a}o P. Hespanha and Junsoo
Lee and Katia Obraczka",
title = "A hybrid systems modeling framework for fast and
accurate simulation of data communication networks",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "58--69",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a general hybrid systems
modeling framework to describe the flow of traffic in
communication networks. To characterize network
behavior, these models use averaging to continuously
approximate discrete variables such as congestion
window and queue size. Because averaging occurs over
short time intervals, one still models discrete events
such as the occurrence of a drop and the consequent
reaction (e.g., congestion control). The proposed
hybrid systems modeling framework fills the gap between
packet-level and fluid-based models: by averaging
discrete variables over a very short time scale (on the
order of a round-trip time), our models are able to
capture the dynamics of transient phenomena fairly
accurately. This provides significant flexibility in
modeling various congestion control mechanisms,
different queuing policies, multicast transmission,
etc. We validate our hybrid modeling methodology by
comparing simulations of the hybrid models against
packet-level simulations. We find that the probability
density functions produced by ns-2 and our hybrid model
match very closely with an $ L^1$-distance of less than
1\%. We also present complexity analysis of ns-2 and
the hybrid model. These tests indicate that hybrid
models are considerably faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; data communication networks;
hybrid systems; simulation; TCP; UDP",
}
@Article{Samios:2003:MTT,
author = "Charalampos (Babis) Samios and Mary K. Vernon",
title = "Modeling the throughput of {TCP Vegas}",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "71--81",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/781027.781037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Previous analytic models of TCP Vegas throughput have
been developed for loss-free (all-Vegas) networks. This
work develops a simple and accurate analytic model for
the throughput of a TCP Vegas bulk transfer in the
presence of packet loss, as a function of average round
trip time, minimum round trip time, and loss rate for
the transfer. Similar models have previously been
developed for TCP Reno. However, several aspects of TCP
Vegas need to be treated differently than their
counterparts in Reno. The proposed model captures the
key innovative mechanisms that Vegas employs during
slow start, congestion avoidance, and congestion
recovery. The results include (1) a simple, validated
model of TCP Vegas throughput that can be used for
equation-based rate control of other flows such as UDP
streams, (2) a simple formula to determine, from the
measured packet loss rate, whether the network buffers
are overcommitted and thus the TCP Vegas flow cannot
reach the specified target lower threshold on
throughput, (3) new insights into the design and
performance of TCP Vegas, and (4) comparisons between
TCP Vegas and TCP Reno including new insights regarding
incremental deployment of TCP Vegas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance model; TCP; TCP Vegas; throughput",
}
@Article{Wang:2003:MAU,
author = "Jiantao Wang and Ao Tang and Steven H. Low",
title = "Maximum and asymptotic {UDP} throughput under
{CHOKe}",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "82--90",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/885651.781038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recently proposed active queue management, CHOKe,
aims to protect TCP from UDP flows. Simulations have
shown that as UDP rate increases, its bandwidth share
initially rises but eventually drops. We derive an
approximate model of CHOKe and show that, provided the
number of TCP flows is large, the UDP bandwidth share
peaks at {\em (e+1)$^{-1}$ = 0.269\/} when the UDP
input rate is slightly larger than the link capacity,
and drops to zero as UDP input rate tends to infinity,
regardless of the TCP algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AQM; bandwidth share; CHOKe; TCP; UDP",
}
@Article{Liu:2003:FMS,
author = "Yong Liu and Francesco {Lo Presti} and Vishal Misra
and Don Towsley and Yu Gu",
title = "Fluid models and solutions for large-scale {IP}
networks",
journal = j-SIGMETRICS,
volume = "31",
number = "1",
pages = "91--101",
month = jun,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/885651.781039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Jun 26 11:41:41 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a scalable model of a network
of Active Queue Management (AQM) routers serving a
large population of TCP flows. We present efficient
solution techniques that allow one to obtain the
transient behavior of the average queue lengths, packet
loss probabilities, and average end-to-end latencies.
We model different versions of TCP as well as different
versions of RED, the most popular AQM scheme currently
in use. Comparisons between our models and ns
simulation show our models to be quite accurate while
at the same time requiring substantially less time to
solve, especially when workloads and bandwidths are
high.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid model; large-scale IP networks; simulation",
}
@Article{Harrison:2003:GNP,
author = "P. G. Harrison",
title = "{G}-networks with propagating resets via {RCAT}",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "3--5",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959144",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stationary Markovian networks, defined by a collection
of cooperating agents, can be solved for their
equilibrium state probability distribution by a new
compositional method that computes their reversed
Markov process, under appropriate conditions. We apply
this approach to G-networks with chains of triggers and
generalised resets, which have some quite distinct
properties from the resets proposed recently.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2003:MTV,
author = "Adam Wierman and Takayuki Osogami and J{\"o}rgen
Ols{\'e}n",
title = "Modeling {TCP-Vegas} under on\slash off traffic",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "6--8",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2003:WIS,
author = "David Gamarnik and John Hasenbein",
title = "Weak instability in stochastic and fluid queueing
networks",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "9--10",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The fluid model has proven to be one of the most
effective tools for the analysis of stochastic queueing
networks, specifically for the analysis of stability.
It is known that stability of a fluid model implies
positive (Harris) recurrence (stability) of a
corresponding stochastic queueing network, and weak
stability implies rate stability of a corresponding
stochastic network. These results have been established
both for cases of specific scheduling policies and for
the class of all work conserving policies. However,
only partial converse results have been established and
in certain cases converse statements do not hold. In
this paper we close one of the existing gaps. For the
case of networks with two stations we prove that if the
fluid model is not weakly stable under the class of all
work conserving policies, then any corresponding
queueing network is not rate stable under the class of
all work conserving policies. We establish the result
by building a particular work conserving scheduling
policy which makes any corresponding stochastic process
transient. An important corollary of our result is that
the condition of the form {\rho}*",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid limits; Harris recurrence; large deviations",
}
@Article{Duarte:2003:AFA,
author = "Fl{\'a}vio P. Duarte and Edmundo {de Souza e Silva}
and Don Towsley",
title = "An adaptive {FEC} algorithm using hidden {Markov}
chains",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "11--13",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andrew:2003:AOG,
author = "Lachlan L. H. Andrew and Yuliy Baryshnikov and E. G.
Coffman and Stephen V. Hanly and Jolyon White",
title = "An asymptotically optimal greedy algorithm for large
optical burst switching systems",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "14--16",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959152",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As the number of wavelengths in OBS systems increases,
the utilization achievable for a given blocking
probability can be made to approach 100\%. This paper
shows that this property applies to a wavelength
allocation algorithm of greedy type. Another property
of this rule, one shared by most other wavelength
assignment algorithms, is that, since lost traffic
tends to occur near destinations, where the resource
usage wasted by such traffic is large, very low
blocking probabilities are important for efficient
operation. To help identify regions of low blocking
probability, we derive an asymptotically exact
condition for zero blocking probabilities; it has a
form reminiscent of the stability condition of the
M/G/1 queue.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid limits; hydrodynamic limits; optical burst
switching; optical networks; stochastic modeling;
wavelength division multiplexing",
}
@Article{Marbukh:2003:TMF,
author = "Vladimir Marbukh",
title = "Towards mean field theory of wireless networks",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "17--19",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes to leverage a large body of
results on performance evaluation and optimization of
wire-line networks for obtaining relevant results for
wireless networks by using mean field approximation
based on the `effective' link capacities. We derive
mean field equations for the effective link capacities
and demonstrate how these capacities can be used for
evaluating the throughput regions as a function of the
channel model as well as transmission and routing
protocols. We also discuss possibility of using mean
field approximation for assessing the quality of
service as a function of the external demands within
the throughput region.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mean field approximation; performance; wireless
network",
}
@Article{Lam:2003:PQS,
author = "Sum Lam and Rocky K. C. Chang",
title = "Per-queue stability analysis of a dynamic quota
sharing scheme for wireless networks",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "20--22",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we consider a dynamic quota sharing
scheme to support different classes of data traffic in
wireless networks. The novelty of this scheme enables
the lower-priority classes of traffic to use what has
not been used by the higher-priority classes. We have
performed per-queue stability analysis for this scheme.
Based on the stability results, threshold values can be
appropriately determined to fulfill certain throughput
requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2003:IPN,
author = "Richard T. B. Ma and C. M. Lee and John C. S. Lui and
David K. Y. Yau",
title = "Incentive {P2P} networks: a protocol to encourage
information sharing and contribution",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "23--25",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2003:PDR,
author = "Eitan Bachmat",
title = "On the performance of {D}-redundant storage systems",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "26--27",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "AD-redundant storage system is a system containing D
identical disks which hold data whose total capacity is
that of a single disk.\par
A simple example of a D-redundant storage system is the
D-shadowed disk system in which there are D copies of
each data element. These copies are placed at identical
locations on the different disks.\par
The existence of multiple copies can be exploited to
improve read request access time. In a shadowed system,
for example, a read request may be serviced by the disk
whose head position is closest to the copy of the
requested data. In this note we will assume for
simplicity that all requests are read requests. The
analysis of write requests has a different character
since writes may in general be serviced
asynchronously.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2003:ABM,
author = "Alma Riska and Evgenia Smirni and Gianfranco Ciardo",
title = "An aggregation-based method for the exact analysis of
a class of {GI/G/1}-type processes",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "28--30",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present an aggregation-based algorithm for the
exact analysis of Markov chains with GI/G/1-type
pattern in their repetitive structure, i.e., chains
that exhibit {\em both\/} M/G/1-type and GI/M/1-type
patterns and cannot be solved with existing techniques.
Markov chains with a GI/G/1 pattern result when
modeling open systems with faults/repairs that accept
jobs from multiple exogenous sources. Our method
provides exact computation of the steady state
probabilities, and allows computation of performance
measures of interest including the system queue length
or any of its higher moments, the exact probability of
system failures and repairs, and consequently a host of
performability measures. Our algorithm also applies to
systems that are purely of the M/G/1-type or the
GI/M/1-type, or their intersection, i.e.,
quasi-birth-death processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GI/G/1-type processes; GI/M/1-type processes;
M/G/1-type processes; Markov chains; matrix-analytic
techniques; reliability analysis; stochastic
complementation",
}
@Article{Lin:2003:HDQ,
author = "Wuqin Lin and Zhen Liu and Harry Stavropoulos and
Cathy H. Xia",
title = "Hard deadline queueing system with application to
unified messaging service",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "31--33",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a queueing system with jobs having hard
deadlines. This is motivated by recent emerging unified
messaging service applications. The service time of a
job is assumed to be known upon arrival. A job will be
lost if not being served by its deadline. For the
single-server system, we propose an on-line ED-Push
algorithm that is easy to implement and can achieve
near-optimal performance in terms of minimizing the
loss probability. Performance analyses for the
underlying M/M/l+D and G/D/1+D systems are then
provided. We also give approximation on the loss
probability for the system with multiple servers under
least workload routing scheme. The numerical results
show that ED-Push algorithm performs well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bansal:2003:AST,
author = "Nikhil Bansal",
title = "On the average sojourn time under {M/M/1/SRPT}",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "34--35",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an M/M/1 queueing system under the
Shortest Remaining Processing Time (SRPT) policy. We
show that there are constants $ c_l $ and $ c_2 $ such
the average sojourn time under SRPT lies between $
c_l(\mu (1 \rho) \log 1 / (1 - \rho))^{-1} $ and $ c_2
(\mu (l - \rho) \log 1 / (1 - \rho))^{-1} $, where $
\mu $ denotes the service rate and $ \rho $ denotes the
load. Comparing this with the classic result that any
scheduling policy that does not use the knowledge of
job sizes has average sojourn time $ (\mu (1 -
\rho))^{-1} $, implies that SRPT offers a non-constant
improvement over such policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feng:2003:MSD,
author = "Hanhua Feng and Vishal Misra",
title = "Mixed scheduling disciplines for network flows",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "36--39",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a novel method to prove that the FBPS
discipline has optimal mean sojourn time and mean
slowdown ratio for DHR service time distributions in an
M/G/1 queue. We then discuss the problems related to
FBPS, and propose a new scheduling discipline to
overcome these problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghosh:2003:RCS,
author = "Soumyadip Ghosh and Mark S. Squillante",
title = "Revisiting correlations and scheduling in {Web}
servers",
journal = j-SIGMETRICS,
volume = "31",
number = "2",
pages = "40--42",
month = sep,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/959143.959166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:50 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Titchkosky:2003:PCD,
author = "Lance Titchkosky and Martin Arlitt and Carey
Williamson",
title = "A performance comparison of dynamic {Web}
technologies",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "2--11",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974037",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today, many Web sites dynamically generate responses
`on the fly' when user requests are received. In this
paper, we experimentally evaluate the impact of three
different dynamic content technologies (Perl, PHP, and
Java) on Web server performance. We quantify achievable
performance first for static content serving, and then
for dynamic content generation, considering cases both
with and without database access. The results show that
the overheads of dynamic content generation reduce the
peak request rate supported by a Web server up to a
factor of 8, depending on the workload characteristics
and the technologies used. In general, our results show
that Java server technologies typically outperform both
Perl and PHP for dynamic content generation, though
performance under overload conditions can be erratic
for some implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Dynamic Content Generation; Performance Evaluation;
Web Performance; Web Server Benchmarking",
}
@Article{Allman:2003:ELR,
author = "Mark Allman and Wesley M. Eddy and Shawn Ostermann",
title = "Estimating loss rates with {TCP}",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "12--24",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974038",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimating loss rates along a network path is a
problem that has received much attention within the
research community. However, deriving accurate
estimates of the loss rate from TCP transfers has been
largely unaddressed. In this paper, we first show that
using a simple count of the number of retransmissions
yields inaccurate estimates of the loss rate in many
cases. The mis-estimation stems from flaws in TCP's
retransmission schemes that cause the protocol to
spuriously retransmit data in a number of cases. Next,
we develop techniques for refining the retransmission
count to produce a better loss rate estimate for both
Reno and SACK variants of TCP. Finally, we explore two
SACK-based variants of TCP with an eye towards reducing
spurious retransmits, the root cause of the
mis-estimation of the loss rate. An additional benefit
of reducing the number of needless retransmits is a
reduction in the amount of shared network resources
used to accomplish no useful work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Douceur:2003:RHA,
author = "John R. Douceur",
title = "Is remote host availability governed by a universal
law?",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "25--29",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974039",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The availability of peer-to-peer and other distributed
systems depends not only on the system architecture but
also on the availability characteristics of the hosts
participating in the system. This paper constructs a
model of remote host availability, derived from
measurement studies of four host populations. It argues
that hosts are incompletely partitioned into two
behavioral classes, one in which they are cycled on/off
periodically and one in which they are nominally kept
on constantly. Within a class, logarithmic availability
generally follows a uniform distribution; however, the
underlying reason for this is not readily apparent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brebner:2003:JIS,
author = "Paul Brebner and Jeffrey Gosper",
title = "{J2EE} infrastructure scalability and throughput
estimation",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "30--36",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974040",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "ECperf, the widely recognized industry standard J2EE
benchmark, has attracted a large number of results
submissions and their subsequent publication. However,
ECperf places little restriction on the hardware
platform, operating systems and databases utilized in
the benchmarking process. This, combined with the
existence of only two primary metrics, makes it
difficult to accurately compare the results, or the
performance of the Application Server products
themselves. By mining the full-disclosure archives for
trends and correlations we have discovered that J2EE
technology is very scalable with increasing middle-tier
resources, as long as the database has sufficient
resources to prevent it becoming a bottleneck. Other
observed trends include, a linear correlation between
middle-tier total processing power and throughput, as
well as between J2EE Application Server license costs
and throughput. However, the results clearly indicate
that there is an increasing cost per user with
increasing capacity systems. Finally, the correlation
between middle-tier processing power and throughput,
combined with results obtained from a different
`lighter-weight' benchmark, facilitates an estimate of
throughput for different types of J2EE applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ECperf benchmark; Enterprise Java Beans (EJB); J2EE;
scalability; throughput",
}
@Article{Cui:2003:NHA,
author = "Jike Cui and Mansur. H. Samadzadeh",
title = "A new hybrid approach to exploit localities: {LRFU}
with adaptive prefetching",
journal = j-SIGMETRICS,
volume = "31",
number = "3",
pages = "37--43",
month = dec,
year = "2003",
CODEN = "????",
DOI = "https://doi.org/10.1145/974036.974041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:51 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper reviewed a number of existing methods to
exploit the spatial and temporal locality commonly
existing in programs, and provided detailed analysis
and testing of adaptive prefetching (a method designed
to utilize spatial locality) and the least recently and
frequently used (LRFU) method (a method designed to
utilize temporal locality). The two methods were
combined in this work in terms of their exploitation of
locality. The comparative studies of the methods were
done using real traces, and hit rate was used as an
evaluation measure. Results showed that by using
adaptive prefetching, the hit rate improved
significantly by an average of 11.7\% over the hit rate
of LRU in the traces and cache configurations used. It
also showed that LRFU consistently gives higher hit
rates than LRU, but not by much in the trace files and
cache configurations tested. And the X value (a
controllable parameter which determines the Weights
given to recency and frequency) has to be in a certain
range, which is usually narrow, in order to get the
best performance for hit rate. Compared to adaptive
prefetching and LRU, the hybrid approach of combining
adaptive prefetching and LRFU gave a consistently
higher hit rate also. But, affected by the performance
of LRFU, the improvement in the hit rate by the
combination was low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burger:2004:TCA,
author = "Doug Burger and Anand Sivasubramaniam",
title = "Tools for computer architecture research",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "2--3",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054908",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Simulators are critical tools for computer
architecture research and education. They are
invaluable when evaluating hardware designs and
enhancements, that would otherwise be prohibitively
expensive to prototype in practice. Simulators can be
useful vehicles for verifying the validity of initial
designs, understanding their cost-benefit trade-offs,
whether or not a more expensive and time-consuming
hardware prototyping effort is undertaken. In addition
to being the sole vehicle for conducting an
investigation in different research organizations,
simulators are extensively used in industry for
validating new ideas before justifying further
investments on those ideas. Further, simulators can
also serve as excellent platforms for teaching
difficult concepts in hardware and compilers, by
allowing students hands-on access to hardware and
software internals that may not be accessible
otherwise.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burger:2004:RES,
author = "Doug Burger and Todd M. Austin and Stephen W.
Keckler",
title = "Recent extensions to the {SimpleScalar Tool} suite",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "4--7",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the past eight years, the SimpleScalar Tool suite
has become the most widely used set of simulation tools
in the computer architecture research community. The
authors have recently completed an NSF-funded project
to extend and improve the SimpleScalar tools. In this
paper, we describe the extensions and improvements to
the tools, which include the capability to simulate
more instruction sets, graphical support for
performance viewing, and more simulators that model
different types of machines, including embedded
systems, ISA-specific systems, systems with operating
system, and multiprocessing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bohrer:2004:MFS,
author = "Patrick Bohrer and James Peterson and Mootaz Elnozahy
and Ram Rajamony and Ahmed Gheith and Ron Rockhold and
Charles Lefurgy and Hazim Shafi and Tarun Nakra and
Rick Simpson and Evan Speight and Kartik Sudeep and
Eric {Van Hensbergen} and Lixin Zhang",
title = "{Mambo}: a full system simulator for the {PowerPC}
architecture",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "8--12",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mambo is a full-system simulator for modeling
PowerPC-based systems. It provides building blocks for
creating simulators that range from purely functional
to timing-accurate. Functional versions support fast
emulation of individual PowerPC instructions and the
devices necessary for executing operating systems.
Timing-accurate versions add the ability to account for
device timing delays, and support the modeling of the
PowerPC processor microarchitecture. We describe our
experience in implementing the simulator and its uses
within IBM to model future systems, support early
software development, and design new system software.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brooks:2004:PPS,
author = "David Brooks and Pradip Bose and Margaret Martonosi",
title = "Power-performance simulation: design and validation
strategies",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "13--18",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054911",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Microprocessor research and development increasingly
relies on detailed simulations to make design choices.
As such, the structure, speed, and accuracy of
microarchitectural simulators is of critical importance
to the field. This paper describes our experiences in
building two simulators, using related but distinct
approaches. One of the most important attributes of a
simulator is its ability to accurately convey design
trends as different aspects of the microarchitecture
are varied. In this work, we break down accuracy---a
broad term--- into two sub-types: {\em relative\/} and
{\em absolute\/} accuracy. We then discuss typical
abstraction errors in power-performance simulators and
show when they do (or do not) affect the design rule
choices a user of those simulator might make. By
performing this validation study using the Wattch and
Power Timer simulators, the work addresses validation
issues both broadly and in the specific case of a
fairly widely-used simulator.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vachharajani:2004:LSE,
author = "Manish Vachharajani and Neil Vachharajani and David A.
Penry and Jason A. Blome and David I. August",
title = "The {Liberty Simulation Environment}, version 1.0",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "19--24",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "High-level hardware modeling via simulation is an
essential step in hardware systems design and research.
Despite the importance of simulation, current model
creation methods are error prone and are unnecessarily
time consuming. To address these problems, we have
publicly released the Liberty Simulation Environment
(LSE), Version 1.0, consisting of a simulator builder
and automatic visualizer based on a shared hardware
description language. LSE's design was motivated by a
careful analysis of the strengths and weaknesses of
existing systems. This has resulted in a system in
which models are easier to understand, faster to
develop, and have performance on par with other
systems. LSE is capable of modeling {\em any\/}
synchronous hardware system. To date, LSE has been used
to simulate and convey ideas about a diverse set of
complex systems including a chip multiprocessor
out-of-order IA-64 machine and a multiprocessor system
with detailed device models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hamerly:2004:HUS,
author = "Greg Hamerly and Erez Perelman and Brad Calder",
title = "How to use {SimPoint} to pick simulation points",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "25--30",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054913",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding the cycle level behavior of a processor
running an application is crucial to modern computer
architecture research. To gain this understanding,
detailed cycle level simulators are typically employed.
Unfortunately, this level of detail comes at the cost
of speed, and simulating the full execution of an
industry standard benchmark on even the fastest
simulator can take weeks to months to complete. This
fact has not gone unnoticed, and several techniques
have been developed aimed at reducing simulation
time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hardavellas:2004:SFA,
author = "Nikolaos Hardavellas and Stephen Somogyi and Thomas F.
Wenisch and Roland E. Wunderlich and Shelley Chen and
Jangwoo Kim and Babak Falsafi and James C. Hoe and
Andreas G. Nowatzyk",
title = "{SimFlex}: a fast, accurate, flexible full-system
simulation framework for performance evaluation of
server architecture",
journal = j-SIGMETRICS,
volume = "31",
number = "4",
pages = "31--34",
month = mar,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1054907.1054914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:20:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The new focus on commercial workloads in simulation
studies of server systems has caused a drastic increase
in the complexity and decrease in the speed of
simulation tools. The complexity of a large-scale
full-system model makes development of a monolithic
simulation tool a prohibitively difficult task.
Furthermore, detailed full-system models simulate so
slowly that experimental results must be based on
simulations of only fractions of a second of execution
of the modelled system. This paper presents SIMFLEX, a
simulation framework which uses component-based design
and rigorous statistical sampling to enable development
of complex models and ensure representative measurement
results with fast simulation turnaround. The novelty of
SIMFLEX lies in its combination of a unique,
compile-time approach to component interconnection and
a methodology for obtaining accurate results from
sampled simulations on a platform capable of evaluating
unmodified commercial workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:2004:STE,
author = "Debasis Mitra",
title = "Stochastic traffic engineering for demand uncertainty
and risk-aware network revenue management",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "1--1",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005687",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic traffic engineering for demand uncertainty
and risk-aware network revenue management We present a
stochastic traffic engineering framework for optimizing
bandwidth provisioning and route selection in networks.
Traffic demands are uncertain and specified by
probability distributions, and the objective is to
maximize a risk-adjusted measure of network revenue
that is generated by serving demands. Considerable
attention is given to the appropriate measure of risk
in the network model. We also advance risk-mitigation
strategies. The optimization model, which is based on
mean-risk analysis, enables a service provider to
maximize a combined measure of mean revenue and revenue
risk. The framework is intended for off-line traffic
engineering, which takes a centralized view of network
topology, link capacity and demand. We obtain
conditions under which the optimization problem is an
instance of convex programming. We study the properties
of the solution and show that it asymptotically meets
the stochastic efficiency criterion. In our numerical
investigations we illustrate the impact of demand
uncertainty on various aspects of the optimally traffic
engineered solutions. The service provider's tolerance
to risk is shown to have a strong influence on the
traffic engineering and revenue management decisions.
We develop the efficient frontier, which is the set of
Pareto optimal pairs of mean revenue and revenue risk,
to aid the service provider in selecting its operating
point.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marin:2004:CAP,
author = "Gabriel Marin and John Mellor-Crummey",
title = "Cross-architecture performance predictions for
scientific applications using parameterized models",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "2--13",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005691",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a toolkit for semi-automatically
measuring and modeling static and dynamic
characteristics of applications in an
architecture-neutral fashion. For predictable
applications, models of dynamic characteristics have a
convex and differentiable profile. Our toolkit operates
on application binaries and succeeds in modeling key
application characteristics that determine program
performance. We use these characterizations to explore
the interactions between an application and a target
architecture. We apply our toolkit to SPARC binaries to
develop architecture-neutral models of computation and
memory access patterns of the ASCI Sweep3D and the NAS
SP, BT and LU benchmarks. From our models, we predict
the L1, L2 and TLB cache miss counts as well as the
overall execution time of these applications on an
Origin 2000 system. We evaluate our predictions by
comparing them against measurements collected using
hardware performance counters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "modeling; performance analysis; prediction",
}
@Article{Huang:2004:MDS,
author = "Lan Huang and Gang Peng and Tzi-cker Chiueh",
title = "Multi-dimensional storage virtualization",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "14--24",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Most state-of-the-art commercial storage
virtualization systems focus only on one particular
storage attribute, capacity. This paper describes the
design, implementation and evaluation of a {\em
multi-dimensional storage virtualization\/} system
called Stonehenge, which is able to virtualize a
cluster-based physical storage system along multiple
dimensions, including bandwidth, capacity, and latency.
As a result, Stonehenge is able to multiplex multiple
virtual disks, each with a distinct bandwidth,
capacity, and latency attribute, on a single physical
storage system as if they are separate physical disks.
A key enabling technology for Stonehenge is an
efficiency-aware real-time disk scheduling algorithm
called dual-queue disk scheduling, which maximizes disk
utilization efficiency while providing Quality of
Service (QoS) guarantees. To optimize disk utilization
efficiency, Stonehenge exploits run-time measurements
extensively, for admission control, computing
latency-derived bandwidth requirement, and predicting
disk service time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "quality of service; storage virtualization",
}
@Article{Blackburn:2004:MRP,
author = "Stephen M. Blackburn and Perry Cheng and Kathryn S.
McKinley",
title = "Myths and realities: the performance impact of garbage
collection",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "25--36",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper explores and quantifies garbage collection
behavior for three whole heap collectors and
generational counterparts: {\em copying semi-space,
mark-sweep,\/} and {\em reference counting}, the
canonical algorithms from which essentially all other
collection algorithms are derived. Efficient
implementations in MMTk, a Java memory management
toolkit, in IBM's Jikes RVM share all common mechanisms
to provide a clean experimental platform.
Instrumentation separates collector and program
behavior, and performance counters measure timing and
memory behavior on three architectures. Our
experimental design reveals key algorithmic features
and how they match program characteristics to explain
the direct and indirect costs of garbage collection as
a function of heap size on the SPEC JVM benchmarks. For
example, we find that the contiguous allocation of
copying collectors attains significant locality
benefits over free-list allocators. The reduced
collection costs of the generational algorithms
together with the locality benefit of contiguous
allocation motivates a copying {\em nursery\/} for
newly allocated objects. These benefits dominate the
overheads of generational collectors compared with
non-generational and no collection, disputing the myth
that `no garbage collection is good garbage
collection.' Performance is less sensitive to the
mature space collection algorithm in our benchmarks.
However the locality and pointer mutation
characteristics for a given program occasionally prefer
copying or mark-sweep. This study is unique in its
breadth of garbage collection algorithms and its depth
of analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "generational; java; mark-sweep; reference counting;
semi-space",
}
@Article{Jin:2004:IPS,
author = "Wei Jin and Jeffrey S. Chase and Jasleen Kaur",
title = "Interposed proportional sharing for a storage service
utility",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "37--48",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005694",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops and evaluates new share-based
scheduling algorithms for differentiated service
quality in network services, such as network storage
servers. This form of resource control makes it
possible to share a server among multiple request flows
with probabilistic assurance that each flow receives a
specified minimum share of a server's capacity to serve
requests. This assurance is important for safe
outsourcing of services to shared utilities such as
Storage Service Providers. Our approach interposes
share-based request dispatching on the network path
between the server and its clients. Two new scheduling
algorithms are designed to run within an intermediary
(e.g., a network switch), where they enforce fair
sharing by throttling request flows and reordering
requests; these algorithms are adaptations of
Start-time Fair Queuing (SFQ) for servers with a
configurable degree of internal concurrency. A third
algorithm, Request Windows (RW), bounds the outstanding
requests for each flow independently; it is amenable to
a decentralized implementation, but may restrict
concurrency under light load. The analysis and
experimental results show that these new algorithms can
enforce shares effectively when the shares are not
saturated, and that they provide acceptable performance
isolation under saturation. Although the evaluation
uses a storage service as an example, interposed
request scheduling is non-intrusive and views the
server as a black box, so it is useful for complex
services with no internal support for differentiated
service quality.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "differentiated service; fair sharing; multiprocessor
scheduling; performance isolation; proportional
sharing; quality of service; storage services; utility
computing; weighted fair queuing",
}
@Article{Soule:2004:FCH,
author = "Augustin Soule and Kav{\'e} Salamatia and Nina Taft
and Richard Emilion and Konstantina Papagiannaki",
title = "Flow classification by histograms: or how to go on
safari in the {Internet}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "49--60",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In order to control and manage highly aggregated
Internet traffic flows efficiently, we need to be able
to categorize flows into distinct classes and to be
knowledgeable about the different behavior of flows
belonging to these classes. In this paper we consider
the problem of classifying BGP level prefix flows into
a small set of homogeneous classes. We argue that using
the entire distributional properties of flows can have
significant benefits in terms of quality in the derived
classification. We propose a method based on modeling
flow histograms using Dirichlet Mixture Processes for
random distributions. We present an inference procedure
based on the Simulated Annealing Expectation
Maximization algorithm that estimates all the model
parameters as well as flow {\em membership
probabilities\/} --- the probability that a flow
belongs to any given class. One of our key
contributions is a new method for Internet flow
classification. We show that our method is powerful in
that it is capable of examining macroscopic flows while
simultaneously making fine distinctions between
different traffic classes. We demonstrate that our
scheme can address issues with flows being close to
class boundaries and the inherent dynamic behaviour of
Internet flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flow classification; Internet traffic; parameter
estimation",
}
@Article{Lakhina:2004:SAN,
author = "Anukool Lakhina and Konstantina Papagiannaki and Mark
Crovella and Christophe Diot and Eric D. Kolaczyk and
Nina Taft",
title = "Structural analysis of network traffic flows",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "61--72",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network traffic arises from the superposition of
Origin-Destination (OD) flows. Hence, a thorough
understanding of OD flows is essential for modeling
network traffic, and for addressing a wide variety of
problems including traffic engineering, traffic matrix
estimation, capacity planning, forecasting and anomaly
detection. However, to date, OD flows have not been
closely studied, and there is very little known about
their properties. We present the first analysis of
complete sets of OD flow time-series, taken from two
different backbone networks (Abilene and
Sprint-Europe). Using Principal Component Analysis
(PCA), we find that the set of OD flows has small
intrinsic dimension. In fact, even in a network with
over a hundred OD flows, these flows can be accurately
modeled in time using a small number (10 or less) of
independent components or dimensions. We also show how
to use PCA to systematically decompose the structure of
OD flow timeseries into three main constituents: common
periodic trends, short-lived bursts, and noise. We
provide insight into how the various constituents
contribute to the overall structure of OD flows and
explore the extent to which this decomposition varies
over time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network traffic analysis; principal component
analysis; traffic engineering",
}
@Article{Soule:2004:HIE,
author = "Augustin Soule and Antonio Nucci and Rene Cruz and
Emilio Leonardi and Nina Taft",
title = "How to identify and estimate the largest traffic
matrix elements in a dynamic environment",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "73--84",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we investigate a new idea for traffic
matrix estimation that makes the basic problem less
under-constrained, by deliberately changing the routing
to obtain additional measurements. Because all these
measurements are collected over disparate time
intervals, we need to establish models for each
Origin-Destination (OD) pair to capture the complex
behaviours of Internet traffic. We model each OD pair
with two components: the diurnal pattern and the
fluctuation process. We provide models that incorporate
the two components above, to estimate both the first
and second order moments of traffic matrices. We do
this for both stationary and cyclo-stationary traffic
scenarios. We formalize the problem of estimating the
second order moment in a way that is completely
independent from the first order moment. Moreover, we
can estimate the second order moment without needing
any routing changes (i.e., without explicit changes to
IGP link weights). We prove for the first time, that
such a result holds for any realistic topology under
the assumption of {\em minimum cost routing\/} and {\em
strictly positive link weights}. We highlight how the
second order moment helps the identification of the top
largest OD flows carrying the most significant fraction
of network traffic. We then propose a refined
methodology consisting of using our variance estimator
(without routing changes) to identify the top largest
flows, and estimate only these flows. The benefit of
this method is that it dramatically reduces the number
of routing changes needed. We validate the
effectiveness of our methodology and the intuitions
behind it by using real aggregated sampled netflow data
collected from a commercial Tier-1 backbone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network tomography; traffic matrix estimation",
}
@Article{Duffield:2004:FSU,
author = "Nick Duffield and Carsten Lund and Mikkel Thorup",
title = "Flow sampling under hard resource constraints",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "85--96",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many network management applications use as their data
traffic volumes differentiated by attributes such as IP
address or port number. IP flow records are commonly
collected for this purpose: these enable determination
of fine-grained usage of network resources. However,
the increasingly large volumes of flow statistics incur
concomitant costs in the resources of the measurement
infrastructure. This motivates sampling of flow
records. This paper addresses sampling strategy for
flow records. Recent work has shown that non-uniform
sampling is necessary in order to control estimation
variance arising from the observed heavy-tailed
distribution of flow lengths. However, while this
approach controls estimator variance, it does not place
hard limits on the number of flows sampled. Such limits
are often required during arbitrary downstream
sampling, resampling and aggregation operations
employed in analysis of the data. This paper proposes a
correlated sampling strategy that is able to select an
arbitrarily small number of the `best' representatives
of a set of flows. We show that usage estimates arising
from such selection are unbiased, and show how to
estimate their variance, both offline for modeling
purposes, and online during the sampling itself. The
selection algorithm can be implemented in a queue-like
data structure in which memory usage is uniformly
bounded during measurement. Finally, we compare the
complexity and performance of our scheme with other
potential approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "IP flows; sampling; variance reduction",
}
@Article{Aalto:2004:TLP,
author = "Samuli Aalto and Urtzi Ayesta and Eeva
Nyberg-Oksanen",
title = "Two-level processor-sharing scheduling disciplines:
mean delay analysis",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "97--105",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005701",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Inspired by several recent papers that focus on
scheduling disciplines for network flows, we present a
mean delay analysis of Multilevel Processor Sharing
(MLPS) scheduling disciplines in the context of M/G/1
queues. Such disciplines have been proposed to model
the effect of the differentiation between short and
long TCP flows in the Internet. Under MLPS, jobs are
classified into classes depending on their attained
service. We consider scheduling disciplines where jobs
within the same class are served either with Processor
Sharing (PS) or Foreground Background (FB) policy, and
the class that contains jobs with the smallest attained
service is served first. It is known that the FB policy
minimizes (maximizes) the mean delay when the hazard
rate of the job size distribution is decreasing
(increasing). Our analysis, based on pathwise and
meanwise arguments of the unfinished truncated work,
shows that Two-Level Processor Sharing (TLPS)
disciplines, e.g., FB+PS and PS+PS, are better than PS
scheduling when the hazard rate of the job size
distribution is decreasing. If the hazard rate is
increasing and bounded, we show that PS outperforms
PS+PS and FB+PS. We further extend our analysis to
study local optimality within a level of an MLPS
scheduling discipline.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FB; LAS; M/G/1; mean delay; MLPS; PS; scheduling;
unfinished truncated work",
}
@Article{Rai:2004:PAB,
author = "Idris A. Rai and Guillaume Urvoy-Keller and Mary K.
Vernon and Ernst W. Biersack",
title = "Performance analysis of {LAS}-based scheduling
disciplines in a packet switched network",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "106--117",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Least Attained Service (LAS) scheduling policy,
when used for scheduling packets over the bottleneck
link of an Internet path, can greatly reduce the
average flow time for short flows while not
significantly increasing the average flow time for the
long flows that share the same bottleneck. No
modification of the packet headers is required to
implement the simple LAS policy. However, previous work
has also shown that a drawback of the LAS scheduler is
that, when link utilization is greater than 70\%, long
flows experience large jitter in their packet transfer
times as compared to the conventional
First-Come-First-Serve (FCFS) link scheduling. This
paper proposes and evaluates new differentiated LAS
scheduling policies that reduce the jitter for long
flows that are identified as `priority' flows. To
evaluate the new policies, we develop analytic models
to estimate average flow transfer time as a function of
flow size, and average packet transmission time as a
function of position in the flow, for the
single-bottleneck `dumbbell topology' used in many ns
simulation studies. Models are developed for FCFS
scheduling, LAS scheduling, and each of the new
differentiated LAS scheduling policies at the
bottleneck link. Over a wide range of configurations,
the analytic estimates agree very closely with the ns
estimates. Thus, the analytic models can be used
instead of simulation for comparing the policies with
respect to mean flow transfer time (as a function of
flow size) and mean packet transfer time. Furthermore,
an initial discrepancy between the analytic and
simulation estimates revealed errors in the parameter
values that are often specified in the widely used ns
Web workload generator. We develop an improved Web
workload specification, which is used to estimate the
packet jitter for long flows (more accurately than with
previous simulation workloads).Results for the
scheduling policies show that a particular policy,
LAS-log, greatly improves the mean flow transfer time
for priority long flows while providing performance
similar to LAS for the ordinary flows. Simulations show
that the LAS-log policy also greatly reduces the jitter
in packet delivery times for the priority flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FCFS and LAS models; LAS-based scheduling and models;
models validation; scheduling; service differentiation;
simulations",
}
@Article{Key:2004:ELP,
author = "Peter Key and Laurent Massouli{\'e} and Bing Wang",
title = "Emulating low-priority transport at the application
layer: a background transfer service",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "118--129",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Low priority data transfer across the wide area is
useful in several contexts, for example for the
dissemination of large files such as OS updates,
content distribution or prefetching. Although the
design of such a service is reasonably easy when the
underlying network supports service differentiation, it
becomes more challenging without such network support.
We describe an application level approach to designing
a low priority service --- one that is `lower than
best-effort' in the context of the current Internet. We
require neither network support nor changes to TCP.
Instead, we use a receive window control to limit the
transfer rate of the application, and the optimal rate
is determined by detecting a change-point. We motivate
this joint control-estimation problem by considering a
fluid-based optimisation framework, and describe
practical solutions, based on stochastic approximation
and binary search techniques. Simulation results
demonstrate the effectiveness of the approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "application reaction; background transfer; binary
search; low priority; stochastic approximation",
}
@Article{Raz:2004:RAQ,
author = "David Raz and Hanoch Levy and Benjamin Avi-Itzhak",
title = "A resource-allocation queueing fairness measure",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "130--141",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005704",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fairness is a major issue in the operation of queues,
perhaps it is the reason why queues were formed in the
first place. Recent studies show that the fairness of a
queueing system is important to customers not less than
the actual delay they experience. Despite this
observation little research has been conducted to study
fairness in queues, and no commonly agreed upon measure
of queue fairness exists. Two recent research
exceptions are Avi-Itzhak and Levy [1], where a
fairness measure is proposed, and Wierman and
Harchol-Balter [18] (this conference, 2003), where a
{\em criterion\/} is proposed for classifying service
policies as fair or unfair; the criterion focuses on
customer service requirement and deals with fairness
with respect to service times. In this work we
recognize that the inherent behavior of a queueing
system is governed by two major factors: Job {\em
seniority\/} (arrival times) and job {\em service
requirement\/} (service time). Thus, it is desired that
a queueing fairness measure would account for both. To
this end we propose a Resource Allocation Queueing
Fairness Measure, (RAQFM), that accounts for both
relative job seniority and relative service time. The
measure allows accounting for individual job
discrimination as well as system unfairness. The system
measure forms a full scale that can be used to evaluate
the level of unfairness under various queueing
disciplines. We present several basic properties of the
measure. We derive the individual measure as well as
the system measure for an M/M/1 queue under five
fundamental service policies: Processor Sharing (PS),
First Come First Served (FCFS), Non-Preemptive Last
Come First Served (NP-LCFS), Preemptive Last Come First
Served (P-LCFS), and Random Order of Service (ROS). The
results of RAQFM are then compared to those of Wierman
and Harchol-Balter [18], and the quite intriguing
observed differences are discussed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; FCFS; job scheduling; M/M/1; processor
sharing; PS; queue disciplines; resource allocation;
unfairness",
}
@Article{Paxson:2004:MA,
author = "Vern Paxson",
title = "Measuring adversaries",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "142--142",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005688",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many concepts and techniques developed for general
Internet measurement have counterparts in the domain of
detecting and analyzing network attacks. The task is
greatly complicated, however, by the fact that the
object of study is {\em adversarial\/}: attackers do
not wish to be `measured' and will take steps to thwart
observation. We look at the far-ranging consequences of
this different measurement environment: the analysis
difficulties-some fundamental-that arise due to subtle
ambiguities in the true semantics of observed traffic;
new notions of `active measurement'; the highly
challenging task of rapidly characterizing
Internet-scale phenomena such as global worm pandemics;
the need for detailed application-level analysis and
related policy and legal difficulties; attacks that
target passive analysis tools; and the inherent `arms
race' nature of the undertaking.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2004:FSF,
author = "Hwangnam Kim and Jennifer C. Hou",
title = "A fast simulation framework for {IEEE 802.11}-operated
wireless {LANs}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "143--154",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005706",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we develop a fast simulation framework
for IEEE 802.11-operated wireless LANs (WLANs), in
which a large number of packets are abstracted as a
single fluid chunk, and their behaviors are
approximated with analytic fluid models and figured
into the simulation. We first derive the analytical
model that characterizes data transmission activities
in IEEE 802.11-operated WLANs with/without the RTS/CTS
mechanism. All the control overhead incurred in the
physical and MAC layers, as well as system parameters
specified in IEEE 802.11 [12] are faithfully figured
in. We validate the model with simulation in cases in
which the network is and is not saturated. We then
implement, with the use of the time stepping technique
[21], the fast simulation framework for WLANs in {\em
ns-2\/} [2], and conduct a comprehensive simulation
study to evaluate the framework in terms of speed-up
and errors incurred under a variety of network
configurations. The simulation results indicate that
the proposed framework is indeed effective in
simulating IEEE 802.11-operated WLANs. It achieves as
much as two orders of magnitude improvement in terms of
execution time as compared to packet-level simulation.
The performance improvement is more pronounced when the
number of wireless nodes, the number of applications
running on each wireless node, or the number of WLANs
increases. The relative error, on the other hand, falls
within 2\% in all cases, as long as the value of the
time step is appropriately determined.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fast simulation; IEEE 802.11; throughput analysis;
wireless LANs",
}
@Article{Hao:2004:ARF,
author = "Fang Hao and Murali Kodialam and T. V. Lakshman",
title = "{ACCEL-RATE}: a faster mechanism for memory efficient
per-flow traffic estimation",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "155--166",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005707",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Per-flow network traffic measurement is an important
component of network traffic management, network
performance assessment, and detection of anomalous
network events such as incipient DoS attacks. In [1],
the authors developed a mechanism called RATE where the
focus was on developing a memory efficient scheme for
estimating per-flow traffic rates to a specified level
of accuracy. The time taken by RATE to estimate the
per-flow rates is a function of the specified
estimation accuracy and this time is acceptable for
several applications. However some applications, such
as quickly detecting worm related activity or the
tracking of transient traffic, demand faster estimation
times. The main contribution of this paper is a new
scheme called ACCEL-RATE that, for a specified level of
accuracy, can achieve orders of magnitude decrease in
per-flow rate estimation times. It achieves this by
using a hashing scheme to split the incoming traffic
into several sub-streams, estimating the per-flow
traffic rates in each of the substreams and then
relating it back to the original per-flow traffic
rates. We show both theoretically and experimentally
that the estimation time of ACCEL-RATE is at least one
to two orders of magnitude lower than RATE without any
significant increase in the memory size.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burtscher:2004:VFE,
author = "Martin Burtscher",
title = "{VPC3}: a fast and effective trace-compression
algorithm",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "167--176",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005708",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Trace files are widely used in research and academia
to study the behavior of programs. They are simple to
process and guarantee repeatability. Unfortunately,
they tend to be very large. This paper describes {\em
vpc3}, a fundamentally new approach to compressing
program traces. {\em Vpc3\/} employs value predictors
to bring out and amplify patterns in the traces so that
conventional compressors can compress them more
effectively. In fact, our approach not only results in
much higher compression rates but also provides faster
compression and decompression. For example, compared to
{\em bzip2}, {\em vpc3\/}'s geometric mean compression
rate on SPECcpu2000 store address traces is 18.4 times
higher, compression is ten times faster, and
decompression is three times faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "predictor-based compression; trace compression; trace
files",
}
@Article{Kumar:2004:DSA,
author = "Abhishek Kumar and Minho Sung and Jun (Jim) Xu and Jia
Wang",
title = "Data streaming algorithms for efficient and accurate
estimation of flow size distribution",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "177--188",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005709",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Knowing the distribution of the sizes of traffic flows
passing through a network link helps a network operator
to characterize network resource usage, infer traffic
demands, detect traffic anomalies, and accommodate new
traffic demands through better traffic engineering.
Previous work on estimating the flow size distribution
has been focused on making inferences from sampled
network traffic. Its accuracy is limited by the
(typically) low sampling rate required to make the
sampling operation affordable. In this paper we present
a novel data streaming algorithm to provide much more
accurate estimates of flow distribution, using a `lossy
data structure' which consists of an array of counters
fitted well into SRAM. For each incoming packet, our
algorithm only needs to increment one underlying
counter, making the algorithm fast enough even for 40
Gbps (OC-768) links. The data structure is lossy in the
sense that sizes of multiple flows may collide into the
same counter. Our algorithm uses Bayesian statistical
methods such as Expectation Maximization to infer the
most likely flow size distribution that results in the
observed counter values after collision. Evaluations of
this algorithm on large Internet traces obtained from
several sources (including a tier-1 ISP) demonstrate
that it has very high measurement accuracy (within
2\%). Our algorithm not only dramatically improves the
accuracy of flow distribution measurement, but also
contributes to the field of data streaming by
formalizing an existing methodology and applying it to
the context of estimating the flow-distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; network measurement; statistical
inference; traffic analysis",
}
@Article{Ma:2004:GTA,
author = "Richard T. B. Ma and Sam C. M. Lee and John C. S. Lui
and David K. Y. Yau",
title = "A game theoretic approach to provide incentive and
service differentiation in {P2P} networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "189--198",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005711",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional peer-to-peer (P2P) networks do not provide
service differentiation and incentive for users.
Consequently, users can obtain services without
themselves contributing any information or service to a
P2P community. This leads to the `free-riding' and
`tragedy of the commons' problems, in which the
majority of information requests are directed towards a
small number of P2P nodes willing to share their
resources. The objective of this work is to enable
service differentiation in a P2P network based on the
amount of services each node has provided to its
community, thereby encouraging all network nodes to
share resources. We first introduce a resource
distribution mechanism between all information sharing
nodes. The mechanism is driven by a distributed
algorithm which has linear time complexity and
guarantees Pareto-optimal resource allocation. Besides
giving incentive, the mechanism distributes resources
in a way that increases the aggregate utility of the
whole network. Second, we model the whole resource
request and distribution process as a competition game
between the competing nodes. We show that this game has
a Nash equilibrium and is collusion-proof. To realize
the game, we propose a protocol in which all competing
nodes interact with the information providing node to
reach Nash equilibrium in a dynamic and efficient
manner. Experimental results are reported to illustrate
that the protocol achieves its service differentiation
objective and can induce productive information sharing
by rational network nodes. Finally, we show that our
protocol can properly adapt to different node arrival
and departure events, and to different forms of network
congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:2004:FRS,
author = "Simon S. Lam and Huaiyu Liu",
title = "Failure recovery for structured {P2P} networks:
protocol design and performance evaluation",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "199--210",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005712",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement studies indicate a high rate of node
dynamics in p2p systems. In this paper, we address the
question of how high a rate of node dynamics can be
supported by {\em structured\/} p2p networks. We
confine our study to the hypercube routing scheme used
by several structured p2p systems. To improve system
robustness and facilitate failure recovery, we
introduce the property of $K$-{\em consistency}, $ K
\geq 1$, which generalizes consistency defined
previously. (Consistency guarantees connectivity from
any node to any other node.) We design and evaluate a
failure recovery protocol based upon local information
for $K$-consistent networks. The failure recovery
protocol is then integrated with a join protocol that
has been proved to construct $K$-consistent neighbor
tables for concurrent joins. The integrated protocols
were evaluated by a set of simulation experiments in
which nodes joined a 2000-node network and nodes (both
old and new) were randomly selected to fail
concurrently over 10,000 seconds of simulated time. In
each such `churn' experiment, we took a `snapshot' of
neighbor tables in the network once every 50 seconds
and evaluated connectivity and consistency measures
over time as a function of the churn rate, timeout
value in failure recovery, and $K$. Storage and
communication overheads were also evaluated. We found
our protocols to be effective, efficient, and stable
for an average node lifetime as low as 8.3 minutes (the
median lifetime measured for Napster and Gnutella was
60 minutes [10]).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "failure recovery; hypercube routing; k-consistency;
peer-to-peer networks; sustainable churn rate",
}
@Article{Wang:2004:ZBP,
author = "Xiaoming Wang and Yueping Zhang and Xiafeng Li and
Dmitri Loguinov",
title = "On zone-balancing of peer-to-peer networks: analysis
of random node join",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "211--222",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005713",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Balancing peer-to-peer graphs, including zone-size
distributions, has recently become an important topic
of peer-to-peer (P2P) research [1], [2], [6], [19],
[31], [36]. To bring analytical understanding into the
various peer-join mechanisms, we study how
zone-balancing decisions made during the initial
sampling of the peer space affect the resulting zone
sizes and derive several asymptotic results for the
maximum and minimum zone sizes that hold with high
probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "balls-into-bins; load-balancing; modeling;
peer-to-peer",
}
@Article{Kansal:2004:PAT,
author = "Aman Kansal and Dunny Potter and Mani B. Srivastava",
title = "Performance aware tasking for environmentally powered
sensor networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "223--234",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005714",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The use of environmental energy is now emerging as a
feasible energy source for embedded and wireless
computing systems such as sensor networks where manual
recharging or replacement of batteries is not
practical. However, energy supply from environmental
sources is highly variable with time. Further, for a
distributed system, the energy available at its various
locations will be different. These variations strongly
influence the way in which environmental energy is
used. We present a harvesting theory for determining
performance in such systems. First we present a model
for characterizing environmental sources. Second, we
state and prove two harvesting theorems that help
determine the sustainable performance level from a
particular source. This theory leads to practical
techniques for scheduling processes in energy
harvesting systems. Third, we present our
implementation of a real embedded system that runs on
solar energy and uses our harvesting techniques. The
system adjusts its performance level in response to
available resources. Fourth, we propose a localized
algorithm for increasing the performance of a
distributed system by adapting the process scheduling
to the spatio-temporal characteristics of the
environmental energy in the distributed system. While
our theoretical intuition is based on certain
abstractions, all the scheduling methods we present are
motivated solely from the experimental behavior and
resource constraints of practical sensor networking
systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy harvesting; performance guarantees; process
scheduling",
}
@Article{Bonald:2004:PBI,
author = "Thomas Bonald and Alexandre Prouti{\`e}re",
title = "On performance bounds for the integration of elastic
and adaptive streaming flows",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "235--245",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a network model where bandwidth is fairly
shared by a dynamic number of elastic and adaptive
streaming flows. Elastic flows correspond to data
transfers while adaptive streaming flows correspond to
audio/video applications with variable rate codecs. In
particular, the former are characterized by a fixed
size (in bits) while the latter are characterized by a
fixed duration. This flow-level model turns out to be
intractable in general. In this paper, we give
performance bounds for both elastic and streaming
traffic by means of sample-path arguments. These bounds
present the practical interest of being insensitive to
traffic characteristics like the distributions of
elastic flow size and streaming flow duration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive streaming traffic; elastic traffic;
flow-level analysis; insensitive bounds; multi-service
network",
}
@Article{Deb:2004:RBV,
author = "Supratim Deb and R. Srikant",
title = "Rate-based versus queue-based models of congestion
control",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "246--257",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Mathematical models of congestion control capture the
congestion indication mechanism at the router in two
different ways: rate-based models, where the
queue-length at the router does not explicitly appear
in the model, and queue-based models, where the queue
length at the router is explicitly a part of the model.
Even though most congestion indication mechanisms use
the queue length to compute the packet marking or
dropping probability to indicate congestion, we argue
that, depending upon the choice of the parameters of
the AQM scheme, one would obtain a rate-based model or
a rate-and-queue-based model as the deterministic limit
of a stochastic system with a large number of users. We
also consider the impact of implementing AQM schemes in
the real queue or a virtual queue. If an AQM scheme is
implemented in a real queue, we show that, to ensure
that the queuing delays are negligible compared to
RTTs, one is forced to choose the parameters of a AQM
scheme in a manner which yields a rate-based
deterministic model. On the other hand, if the AQM
scheme is implemented in a virtual queue, small-queue
operation is achieved independent of the choice of the
parameters, thus showing a robustness property of
virtual queue-based schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AQM parameters; congestion control; virtual queue",
}
@Article{Chandrayana:2004:UCC,
author = "Kartikeya Chandrayana and Shivkumar Kalyanaraman",
title = "Uncooperative congestion control",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "258--269",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditionally uncooperative rate control schemes have
implied open loop protocols such as UDP, CBR, etc. In
this paper we show that closed loop uncooperative rate
control schemes also exist and that the current AQM
proposals cannot efficiently control their
mis-behavior. Moreover, these proposals require that
AQM be installed at all routers in the Internet which
is not only expensive but requires significant network
upgrade. In this paper we show that management of
uncooperative flows need not be coupled with AQM design
but can be viewed as edge based policing question. In
this paper we propose an analytical model for managing
uncooperative flows in the Internet by re-mapping their
utility function to a target range of utility
functions. This mapping can be achieved by
transparently manipulating congestion penalties
conveyed to the uncooperative users. The most
interesting aspect of this research is that this task
can be performed at the edge of the network with little
state information about uncooperative flows. The
proposed solution is independent of the buffer
management algorithm deployed on the network. As such
it works with Drop-Tail queues as well as any AQM
scheme. We have analyzed the framework and evaluated it
on various single and multi-bottleneck topologies with
both Drop-Tail and RED. Our results show that the
framework is robust and works well even in presence of
background traffic and reverse path congestion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; malicious behavior; optimization;
re-marking; selfish flows; uncooperative; utility
functions",
}
@Article{Applegate:2004:CNF,
author = "David Applegate and Lee Breslau and Edith Cohen",
title = "Coping with network failures: routing strategies for
optimal demand oblivious restoration",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "270--281",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Link and node failures in IP networks pose a challenge
for network control algorithms. Routing restoration,
which computes new routes that avoid failed links,
involves fundamental tradeoffs between efficient use of
network resources, complexity of the restoration
strategy and disruption to network traffic. In order to
achieve a balance between these goals, obtaining
routings that provide good performance guarantees under
failures is desirable. In this paper, building on
previous work that provided performance guarantees
under uncertain (and potentially unknown) traffic
demands, we develop algorithms for computing optimal
restoration paths and a methodology for evaluating the
performance guarantees of routing under failures. We
then study the performance of route restoration on a
diverse collection of ISP networks. Our evaluation uses
a competitive analysis type framework, where
performance of routing with restoration paths under
failures is compared to the best possible performance
on the failed network. We conclude that with careful
selection of restoration paths one can obtain
restoration strategies that retain nearly optimal
performance on the failed network while minimizing
disruptions to traffic flows that did not traverse the
failed parts of the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "demand-oblivious routing; restoration; routing",
}
@Article{Sevcik:2004:SSA,
author = "Kenneth C. Sevcik",
title = "Some systems, applications and models {I} have known",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "282--282",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005689",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Being named recipient of the 2004 ACM Sigmetrics
Achievement Award has done several things to me. It
brought me surprise that I would be singled out from
the many people who have made significant and sustained
contributions to the field of performance evaluation.
It also brought me deep appreciation for all the
students and colleagues with whom I have worked and
come to know as friends over the years. Finally, it has
caused me to ponder and reminisce about many of the
research projects and consulting studies in which I
have participated. In this talk, I will describe
various systems I have used and studied, various
applications of interest, and various models that I,
and others, have used to try to gain insights into the
performance of systems. Some lessons of possible future
relevance that emerge from this retrospective look at a
wide variety of projects are the following:
Exact Answers Are Overrated --- While exact solutions
of mathematical models are intellectually satisfying,
they are often not needed in practice.
Analytic Models Have a Role --- Analytic models can be
used to obtain quick and inexpensive answers to
performance questions in many situations where neither
simulation nor experimentation are
feasible.
Assumptions Matter --- Subtle changes to the
assumptions that underlie an analytic model can
substantially alter the conclusions reached based on
the model.
After considering all the methods of analysis,
simulation and experimentation, my recommendation for
the very best means to attain substantially improved
computer system performance is: Wait thirty years!",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tinnakornsrisuphap:2004:CQF,
author = "Peerapol Tinnakornsrisuphap and Richard J. La",
title = "Characterization of queue fluctuations in
probabilistic {AQM} mechanisms",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "283--294",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005721",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a framework for studying the interaction of
a probabilistic active queue management (AQM) algorithm
with a generic end-user congestion-control mechanism.
We show that as the number of flows in the network
increases, the queue dynamics can be accurately
approximated by a simple deterministic process. In
addition, we investigate the sources of queue
fluctuations in this setup. We characterize two
distinct sources of queue fluctuations; one is the
deterministic oscillations which can be captured
through the aforementioned deterministic process. The
other source is the random fluctuations introduced by
the probabilistic nature of the marking schemes. We
discuss the relationship between these two types of
fluctuations and provide insights into how to control
them. Concrete examples in this framework are given for
several popular algorithms such as Random Early
Detection, Random Early Marking and Transmission
Control Protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active queue management; central limit theorem; queue
fluctuations",
}
@Article{Vanichpun:2004:OCU,
author = "Sarut Vanichpun and Armand M. Makowski",
title = "The output of a cache under the independent reference
model: where did the locality of reference go?",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "295--306",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005722",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a cache operating under a demand-driven
replacement policy when document requests are modeled
according to the Independent Reference Model (IRM). We
characterize the popularity pmf of the stream of misses
from the cache, the so-called output of the cache, for
a large class of demand-driven cache replacement
policies. We measure strength of locality of reference
in a stream of requests through the skewness of its
popularity distribution. Using the notion of
majorization to capture this degree of skewness, we
show that for the policy $ A_0 $ and the random policy,
the output always has less locality of reference than
the input. However, we show by counterexamples that
this is not always the case under the LRU and CLIMB
policies when the input is selected according to a
Zipf-like pmf. In that case, conjectures are offered
(and supported by simulations) as to when LRU or CLIMB
caching indeed reduces locality of reference.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "locality of reference; majorization; output of a
cache; popularity",
}
@Article{Teixeira:2004:DHP,
author = "Renata Teixeira and Aman Shaikh and Tim Griffin and
Jennifer Rexford",
title = "Dynamics of hot-potato routing in {IP} networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "307--319",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005723",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite the architectural separation between
intradomain and interdomain routing in the Internet,
intradomain protocols do influence the path-selection
process in the Border Gateway Protocol (BGP). When
choosing between multiple equally-good BGP routes, a
router selects the one with the {\em closest\/} egress
point, based on the intradomain path cost. Under such
{\em hot-potato\/} routing, an intradomain event can
trigger BGP routing changes. To characterize the
influence of hot-potato routing, we conduct controlled
experiments with a commercial router. Then, we propose
a technique for associating BGP routing changes with
events visible in the intradomain protocol, and apply
our algorithm to AT&T's backbone network. We show that
(i) hot-potato routing can be a significant source of
BGP updates, (ii) BGP updates can lag {\em 60\/}
seconds or more behind the intradomain event, (iii) the
number of BGP path changes triggered by hot-potato
routing has a nearly uniform distribution across
destination prefixes, and (iv) the fraction of BGP
messages triggered by intradomain changes varies
significantly across time and router locations. We show
that hot-potato routing changes lead to longer delays
in forwarding-plane convergence, shifts in the flow of
traffic to neighboring domains, extra
externally-visible BGP update messages, and
inaccuracies in Internet performance measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; convergence; hot-potato routing; OSPF",
}
@Article{Agarwal:2004:IBD,
author = "Sharad Agarwal and Chen-Nee Chuah and Supratik
Bhattacharyya and Christophe Diot",
title = "The impact of {BGP} dynamics on intra-domain traffic",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "319--330",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005724",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work in network traffic matrix estimation has
focused on generating router-to-router or PoP-to-PoP
(Point-of-Presence) traffic matrices within an ISP
backbone from network link load data. However, these
estimation techniques have not considered the impact of
inter-domain routing changes in BGP (Border Gateway
Protocol). BGP routing changes have the potential to
introduce significant errors in estimated traffic
matrices by causing traffic shifts between egress
routers or PoPs within a single backbone network. We
present a methodology to correlate BGP routing table
changes with packet traces in order to analyze how BGP
dynamics affect traffic fan-out within a large `tier-1'
network. Despite an average of 133 BGP routing updates
per minute, we find that BGP routing changes do not
cause more than 0.03\% of ingress traffic to shift
between egress PoPs. This limited impact is mostly due
to the relative stability of network prefixes that
receive the majority of traffic --- 0.05\% of BGP
routing table changes affect intra-domain routes for
prefixes that carry 80\% of the traffic. Thus our work
validates an important assumption underlying existing
techniques for traffic matrix estimation in large IP
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; traffic analysis; traffic engineering; traffic
matrix",
}
@Article{Feamster:2004:MBR,
author = "Nick Feamster and Jared Winick and Jennifer Rexford",
title = "A model of {BGP} routing for network engineering",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "331--342",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005726",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of IP networks depends on a wide
variety of dynamic conditions. Traffic shifts,
equipment failures, planned maintenance, and topology
changes in other parts of the Internet can all degrade
performance. To maintain good performance, network
operators must continually reconfigure the routing
protocols. Operators configure BGP to control how
traffic flows to neighboring Autonomous Systems (ASes),
as well as how traffic traverses their networks.
However, because BGP route selection is distributed,
indirectly controlled by configurable policies, and
influenced by complex interactions with intradomain
routing protocols, operators cannot predict how a
particular BGP configuration would behave in practice.
To avoid inadvertently degrading network performance,
operators need to evaluate the effects of configuration
changes {\em before deploying them on a live network}.
We propose an algorithm that computes the outcome of
the BGP route selection process for each router in a
{\em single\/} AS, given only a static snapshot of the
network state, without simulating the complex details
of BGP message passing. We describe a BGP emulator
based on this algorithm; the emulator exploits the
unique characteristics of routing data to reduce
computational overhead. Using data from a large ISP, we
show that the emulator correctly computes BGP routing
decisions and has a running time that is acceptable for
many tasks, such as traffic engineering and capacity
planning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; modeling; routing; traffic engineering",
}
@Article{Baccelli:2004:MFA,
author = "Fran{\c{c}}ois Baccelli and Augustin Chaintreau and
Danny De Vleeschauwer and David McDonald",
title = "A mean-field analysis of short lived interacting {TCP}
flows",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "343--354",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005727",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider a set of HTTP flows using
TCP over a common drop-tail link to download files.
After each download, a flow waits for a random think
time before requesting the download of another file,
whose size is also random. When a flow is active its
throughput is increasing with time according to the
additive increase rule, but if it suffers losses
created when the total transmission rate of the flows
exceeds the link rate, its transmission rate is
decreased. The throughput obtained by a flow, and the
consecutive time to download one file are then given as
the consequence of the interaction of all the flows
through their total transmission rate and the link's
behavior. We study the mean-field model obtained by
letting the number of flows go to infinity. This
mean-field limit may have two stable regimes: one
without congestion in the link, in which the density of
transmission rate can be explicitly described, the
other one with periodic congestion epochs, where the
inter-congestion time can be characterized as the
solution of a fixed point equation, that we compute
numerically, leading to a density of transmission rate
given by as the solution of a Fredholm equation. It is
shown that for certain values of the parameters (more
precisely when the link capacity per user is not
significantly larger than the load per user), each of
these two stable regimes can be reached depending on
the initial condition. This phenomenon can be seen as
an analogue of turbulence in fluid dynamics: for some
initial conditions, the transfers progress in a fluid
and interaction-less way; for others, the connections
interact and slow down because of the resulting
fluctuations, which in turn perpetuates interaction
forever, in spite of the fact that the load per user is
less than the capacity per user. We prove that this
phenomenon is present in the Tahoe case and both the
numerical method that we develop and simulations
suggest that it is present in the Reno case too. It
translates into a bi-stability phenomenon for the
finite population model within this range of
parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "HTTP connections; mean-field model",
}
@Article{Hohn:2004:BRP,
author = "N. Hohn and D. Veitch and K. Papagiannaki and C.
Diot",
title = "Bridging router performance and queuing theory",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "355--366",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005728",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper provides an authoritative knowledge of
through-router packet delays and therefore a better
understanding of data network performance. Thanks to a
unique experimental setup, we capture {\em all\/}
packets crossing a router for 13 hours and present
detailed statistics of their delays. These measurements
allow us to build the following physical model for
router performance: each packet experiences a minimum
router processing time before entering a fluid output
queue. Although simple, this model reproduces the
router behaviour with excellent accuracy and avoids two
common pitfalls. First we show that in-router packet
processing time accounts for a significant portion of
the overall packet delay and should not be neglected.
Second we point out that one should fully understand
both link and physical layer characteristics to use the
appropriate bandwidth value. Focusing directly on
router performance, we provide insights into system
busy periods and show precisely how queues build up
inside a router. We explain why current practices for
inferring delays based on average utilization have
fundamental problems, and propose an alternative
solution to directly report router delay information
based on busy period statistics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet delay analysis; router model",
}
@Article{Bonald:2004:ILB,
author = "T. Bonald and M. Jonckheere and A. Prouti{\'e}re",
title = "Insensitive load balancing",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "367--377",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005729",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large variety of communication systems, including
telephone and data networks, can be represented by
so-called Whittle networks. The stationary distribution
of these networks is insensitive, depending on the
service requirements at each node through their mean
only. These models are of considerable practical
interest as derived engineering rules are robust to the
evolution of traffic characteristics. In this paper we
relax the usual assumption of static routing and
address the issue of dynamic load balancing.
Specifically, we identify the class of load balancing
policies which preserve insensitivity and characterize
optimal strategies in some specific cases. Analytical
results are illustrated numerically on a number of toy
network examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "insensitivity; load balancing; whittle networks",
}
@Article{Bonald:2004:WDP,
author = "T. Bonald and S. Borst and N. Hegde and A.
Prouti{\'e}re",
title = "Wireless data performance in multi-cell scenarios",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "378--380",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005730",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of wireless data systems has been
extensively studied in the context of a single base
station. In the present paper we investigate the
flow-level performance in networks with multiple base
stations. We specifically examine the complex, dynamic
interaction of the number of active flows in the
various cells introduced by the strong impact of
interference between neighboring base stations. For the
downlink data transmissions that we consider, lower
service rates caused by increased interference from
neighboring base stations result in longer delays and
thus a higher number of active flows. This in turn
results in a longer duration of interference on
surrounding base stations, causing a strong correlation
between the activity states of the base stations. Such
a system can be modelled as a network of multi-class
processor-sharing queues, where the service rates for
the various classes at each queue vary over time as
governed by the activity state of the other queues. The
complex interaction between the various queues renders
an exact analysis intractable in general. A simplified
network with only one class per queue reduces to a
coupled-processors model, for which there are few
results, even in the case of two queues. We thus derive
bounds and approximations for key performance metrics
like the number of active flows, transfer delays, and
flow throughputs in the various cells. Importantly,
these bounds and approximations are insensitive,
yielding simple expressions, that render the detailed
statistical characteristics of the system largely
irrelevant.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "elastic traffic; fluid regime; insensitivity;
multi-class processor-sharing; quasi-stationary regime;
stability; time-varying service; wireless data
networks",
}
@Article{Kapoor:2004:CSA,
author = "Rohit Kapoor and Ling-Jyh Chen and Alok Nandan and
Mario Gerla and M. Y. Sanadidi",
title = "{CapProbe}: a simple and accurate capacity estimation
technique for wired and wireless environments",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "390--391",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005732",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of estimating the capacity of an Internet
path is one of fundamental importance. Due to the
multitude of potential applications, a large number of
solutions have been proposed and evaluated. The
proposed solutions so far have been successful in
partially addressing the problem, but have suffered
from being slow, obtrusive or inaccurate. In this work,
we evaluate CapProbe, a low-cost and accurate
end-to-end capacity estimation scheme that relies on
packet dispersion techniques as well as end-to-end
delays. The key observation that enabled the
development of CapProbe is that both compression and
expansion of packet pair dispersion are the result of
queuing due to cross-traffic. By filtering out queuing
effects from packet pair samples, CapProbe is able to
estimate capacity accurately in most environments, with
minimal processing and probing traffic overhead. In
fact, the storage and processing requirements of
CapProbe are orders of magnitude smaller than most of
the previously proposed schemes. We tested CapProbe
through simulation, Internet, Internet2 and wireless
experiments. We found that CapProbe error percentage in
capacity estimation was within 10\% in almost all
cases, and within 5\% in most cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity estimation; delay; dispersion; packet pair",
}
@Article{Sommers:2004:HFL,
author = "Joel Sommers and Hyungsuk Kim and Paul Barford",
title = "{Harpoon}: a flow-level traffic generator for router
and network tests",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "392--392",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005733",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe Harpoon, a new application-independent
tool for generating representative packet traffic at
the {\em IP flow level}. Harpoon is a configurable tool
for creating TCP and UDP packet flows that have the
same byte, packet, temporal, and spatial
characteristics as measured at routers in live
environments. We validate Harpoon using traces
collected from a live router and then demonstrate its
capabilities in a series of router performance
benchmark tests.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network flows; traffic generation",
}
@Article{Ribeiro:2004:STA,
author = "Vinay J. Ribeiro and Rudolf H. Riedi and Richard G.
Baraniuk",
title = "Spatio-temporal available bandwidth estimation with
{STAB}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "394--395",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005734",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of locating in space and over
time a network path's {\em tight\/} link, that is the
link with the least available bandwidth on the path.
Tight link localization benefits network-aware
applications, provides insight into the causes of
network congestion and ways to circumvent it, and aids
network operations. We present {\em STAB}, a
light-weight probing tool to locate tight links. STAB
combines the probing concepts of self-induced
congestion, tailgating, and packet chirps in a novel
fashion. We demonstrate its capabilities through
experiments on the Internet and verify our results
using router MRTG data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "available bandwidth; bandwidth; bottleneck; chirps;
estimation; probing; tailgating; tight link",
}
@Article{Rajendran:2004:OQS,
author = "Raj Kumar Rajendran and Dan Rubenstein",
title = "Optimizing the quality of scalable video streams on
{P2P} networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "396--397",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005735",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "P2P; quality; scheduling; streaming; video",
}
@Article{Wang:2004:PAT,
author = "Helen J. Wang and John Platt and Yu Chen and Ruyun
Zhang and Yi-Min Wang",
title = "{PeerPressure} for automatic troubleshooting",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "398--399",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005736",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "automatic troubleshooting; Bayesian estimates; golden
state; PeerPressure; statistics; system management",
}
@Article{Hahner:2004:QAP,
author = "J{\"o}rg H{\"a}hner and Dominique Dudkowski and Pedro
Jos{\'e} Marr{\'o}n and Kurt Rothermel",
title = "A quantitative analysis of partitioning in mobile ad
hoc networks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "400--401",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005737",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mobile ad hoc networks; network topology; partition
metrics",
}
@Article{Zhang:2004:LTL,
author = "Dalu Zhang and Weili Huang and Chen Lin",
title = "Locating the tightest link of a network path",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "402--403",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005738",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The tightest link of a network path is the link where
the end-to-end available bandwidth is limited. We
propose a new probe technique, called Dual Rate
Periodic Streams (DRPS), for finding the location of
the tightest link. A DRPS probe is a periodic stream
with two rates. Initially, it goes through the path at
a comparatively high rate. When arrived at a particular
link, the probe shifts its rate to a lower level and
keeps the rate. If proper rates are set to the probe,
we can control whether the probe is congested or not by
adjusting the shift time. When the point of rate shift
is in front of the tightest link, the probe can go
through the path without congestion, otherwise
congestion occurs. Thus, we can find the location of
the tightest link by congestion detection at the
receiver.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "available bandwidth; dual rate periodic streams
(DRPS); network measurements; tight link",
}
@Article{Sullivan:2004:UPR,
author = "David G. Sullivan and Margo I. Seltzer and Avi
Pfeffer",
title = "Using probabilistic reasoning to automate software
tuning",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "404--405",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005739",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Manually tuning the parameters or `knobs' of a complex
software system is an extremely difficult task.
Ideally, the process of software tuning should be
automated, allowing software systems to reconfigure
themselves as needed in response to changing
conditions. We present a methodology that uses a
probabilistic, graphical model known as an influence
diagram as the foundation of an effective, automated
approach to software tuning. We have used our
methodology to simultaneously tune four knobs from the
Berkeley DB embedded database system, and our results
show that an influence diagram can effectively
generalize from training data for this domain.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "influence diagrams; probabilistic reasoning;
self-tuning systems",
}
@Article{Wang:2004:MST,
author = "Bing Wang and Jim Kurose and Prashant Shenoy and Don
Towsley",
title = "Multimedia streaming via {TCP}: an analytic
performance study",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "406--407",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005740",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multimedia streaming; performance modeling",
}
@Article{Wynter:2004:PIQ,
author = "Laura Wynter and Cathy H. Xia and Fan Zhang",
title = "Parameter inference of queueing models for {IT}
systems using end-to-end measurements",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "408--409",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005741",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "end-to-end measurements; inference; queueing models",
}
@Article{Pfaff:2004:PAB,
author = "Ben Pfaff",
title = "Performance analysis of {BSTs} in system software",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "410--411",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005742",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AVL tree; binary search tree; BST; red-black tree;
splay tree; threaded tree",
}
@Article{Wang:2004:SDP,
author = "Mengzhi Wang and Kinman Au and Anastassia Ailamaki and
Anthony Brockwell and Christos Faloutsos and Gregory R.
Ganger",
title = "Storage device performance prediction with {CART}
models",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "412--413",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005743",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work explores the application of a machine
learning tool, CART modeling, to storage devices. We
have developed approaches to predict a device's
performance as a function of input workloads, requiring
no knowledge of the device internals. Two uses of CART
models are considered: one that predicts per-request
response times (and then derives aggregate values) and
one that predicts aggregate values directly from
workload characteristics. After training on the device
in question, both provide reasonably-accurate black box
models across a range of test traces from real
environments. An expanded version of this paper is
available as a technical report [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance prediction; storage device modeling",
}
@Article{Kamra:2004:CPT,
author = "Abhinav Kamra and Vishal Misra and Erich Nahum",
title = "Controlling the performance of 3-tiered {Web} sites:
modeling, design and implementation",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "414--415",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005744",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; control theory; e-commerce; TPC-W",
}
@Article{Roughan:2004:CRT,
author = "Matthew Roughan and Tim Griffin and Morley Mao and
Albert Greenberg and Brian Freeman",
title = "Combining routing and traffic data for detection of
{IP} forwarding anomalies",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "416--417",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005745",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "IP forwarding anomalies, triggered by equipment
failures, implementation bugs, or configuration errors,
can significantly disrupt and degrade network service.
Robust and reliable detection of such anomalies is
essential to rapid problem diagnosis, problem
mitigation, and repair. We propose a simple, robust
method that integrates routing and traffic data streams
to reliably detect forwarding anomalies. The overall
method is scalable, automated and self-training. We
find this technique effectively identifies forwarding
anomalies, while avoiding the high false alarms rate
that would otherwise result if either stream were used
unilaterally.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; network anomaly detection; routing; SNMP;
traffic",
}
@Article{Tao:2004:EPB,
author = "Shu Tao and Kuai Xu and Ying Xu and Teng Fei and Lixin
Gao and Roch Guerin and Jim Kurose and Don Towsley and
Zhi-Li Zhang",
title = "Exploring the performance benefits of end-to-end path
switching",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "418--419",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005746",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multi-homing; overlay; path switching",
}
@Article{Kaplan:2004:CFR,
author = "Scott F. Kaplan",
title = "Complete or fast reference trace collection for
simulating multiprogrammed workloads: choose one",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "420--421",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005747",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "reference trace collection; trace-driven simulation",
}
@Article{Raghunath:2004:QTO,
author = "Satish Raghunath and Shivkumar Kalyanaraman and K. K.
Ramakrishnan",
title = "Quantifying trade-offs in resource allocation for
{VPNs}",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "422--423",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005748",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Virtual Private Networks (VPNs) feature notable
characteristics in structure and traffic patterns that
allow for efficient resource allocation. A strategy
that exploits the underlying characteristics of a VPN
can result in significant capacity savings to the
service provider. There are a number of admission
control and bandwidth provisioning strategies to choose
from. We examine trade-offs in design choices in the
context of distinctive characteristics of VPNs. We
examine the value of signaling-based mechanisms,
traffic matrix information and structural
characteristics of VPNs in the way they impact resource
utilization and service quality. We arrive at important
conclusions which could have an impact on the way VPNs
are architected. We show that the structure of VPNs
profoundly influences achievable resource utilization
gains with various admission control and provisioning
schemes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "hose model; point-to-multipoint; point-to-set; virtual
private networks",
}
@Article{Ruan:2004:ONS,
author = "Yaoping Ruan and Vivek S. Pai",
title = "The origins of network server latency \& the myth of
connection scheduling",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "424--425",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005749",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate the origins of server-induced latency
to understand how to improve latency optimization
techniques. Using the Flash Web server [4], we analyze
latency behavior under various loads. Despite latency
profiles that suggest standard queuing delays, we find
that most latency actually originates from negative
interactions between the application and the locking
and blocking mechanisms in the kernel. Modifying the
server and kernel to avoid these problems yields both
qualitative and quantitative changes in the latency
profiles --- latency drops by more than an order of
magnitude, and the effective service discipline also
improves. We find our modifications also mitigate
service burstiness in the application, reducing the
event queue lengths dramatically and eliminating any
benefit from application-level connection scheduling.
We identify one remaining source of unfairness, related
to competition in the networking stack. We show that
adjusting the TCP congestion window size addresses this
problem, reducing latency by an additional factor of
three.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "connection scheduling; latency; network server",
}
@Article{Anagnostakis:2004:HDI,
author = "K. G. Anagnostakis and M. B. Greenwald",
title = "A hybrid direct-indirect estimator of network internal
delays",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "426--427",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005750",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay; ICMP timestamp; network tomography",
}
@Article{Carlsson:2004:MPS,
author = "Niklas Carlsson and Derek L. Eager and Mary K.
Vernon",
title = "Multicast protocols for scalable on-demand download",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "428--429",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005751",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "batching; cyclic multicast; scalable download
protocols",
}
@Article{Pai:2004:IPI,
author = "Vijay S. Pai and Scott Rixner and Hyong-youb Kim",
title = "Isolating the performance impacts of network interface
cards through microbenchmarks",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "430--431",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1005686.1005752",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network server performance; networking
microbenchmarks",
}
@Article{Chu:2004:ECU,
author = "Jacky Chu and Kevin Labonte and Brian Neil Levine",
title = "An evaluation of {Chord} using traces of peer-to-peer
file sharing",
journal = j-SIGMETRICS,
volume = "32",
number = "1",
pages = "432--433",
month = jun,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1012888.1005753",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:18 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2004:GEF,
author = "Mark S. Squillante",
title = "{Guest Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "2--2",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035336",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Osogami:2004:RAT,
author = "Takayuki Osogami and Adam Wierman and Mor
Harchol-Balter and Alan Scheller-Wolf",
title = "A recursive analysis technique for multi-dimensionally
infinite {Markov} chains",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "3--5",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035337",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance analysis of multiserver systems with
multiple classes of jobs often has a common source of
difficulty: the state space needed to capture the
system behavior grows infinitely in multiple
dimensions. For example, consider two processors, each
serving its own M/M/1 queue, where one of the
processors (the `donor') can help the other processor
(the `beneficiary') with its jobs, during times when
the donor processor is idle [5, 16] or when some
threshold conditions are met [14, 15]. Since the
behavior of beneficiary jobs depends on the number of
donor jobs in system, performance analysis of
beneficiary jobs involves a two dimensionally infinite
(2D-infinite) state space, where one dimension
corresponds to the number of beneficiary jobs and the
other dimension corresponds to the number of donor
jobs. Another example is an M/M/2 queue with two
priority classes, where high priority jobs have
preemptive priority over low priority jobs (see for
example [1, 3, 4, 8, 10, 11, 12, 17] and references
therein). Since the behavior of low priority jobs
depends on the number of high priority jobs in system,
performance analysis of low priority jobs involves
2D-infinite state space, where each dimension
corresponds to the number of each class of jobs in
system. As we will see, when there are m priority
classes, performance analysis of the lowest priority
classes involves m dimensionally infinite state
space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{daSilva:2004:EAT,
author = "Ana Paula Couto da Silva and Rosa M. M. Le{\"a}o and
Edmundo {de Souza e Silva}",
title = "An efficient approximate technique for solving fluid
models",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "6--8",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035338",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic fluid-flow models have been widely used as
an important tool for the analysis of a variety of
computer and communication models. In particular, when
the event rates of the system under investigation vary
in orders of magnitude, the use of fluid models results
in considerable computational savings when compared to
traditional models where all events are explicitly
represented. This is true for instance, in the so
called performability models [10], where events that
represent structural changes in the system (e.g.,
failure and repair events) occur at much lower rates
than those associated with some performance measure,
such as the arrival and service of jobs. As another
example, consider a queueing model of a communication
network channel. The intervals between events
associated with packet arrival and departure from a
buffer may be orders of magnitude smaller than the
intervals that represent changes in the arrival rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kogan:2004:TPI,
author = "Yaakov Kogan and Gagan Choudhury",
title = "Two problems in {Internet} reliability: new questions
for old models",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "9--11",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035339",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper is motivated by two problems related to
Internet reliability, where transient rather than
traditional steady-state analysis is required. First, a
failure and repair model for a router with active and
redundant processors is considered. It is proved that
the number of failed routers during given interval of
time is asymptotically Poisson when the total number of
routers is large and the parameter of the Poisson
process is explicitly calculated. The second problem is
related to reliability of a nationwide IP backbone. A
situation, where operational links do not have enough
spare capacity to carry additional traffic during the
outage time, is referred to as bandwidth loss. We
consider only one unidirectional backbone link and
derive asymptotic approximations for the expected
bandwidth loss in the framework of generalized Erlang
and Engset models when the total number of resource
units and request arrival rates are proportionally
large.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2004:FSS,
author = "Adam Wierman and Mor Harchol-Balter",
title = "Formalizing {SMART} scheduling",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "12--13",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035340",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well-known that policies which bias towards
small job sizes or jobs with small remaining service
times perform well with respect to mean response time
and mean slowdown. This idea has been fundamental in
many system implementations including the case of Web
servers, where it has been shown that by giving
priority to requests for small files, a Web server can
significantly reduce mean response time and mean
slowdown [1]. The heuristic has also been applied to
other application areas; for example, scheduling in
supercomputing centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raz:2004:HFQ,
author = "David Raz and Benjamin Avi-Itzhak and Hanoch Levy",
title = "How fair is queue prioritization?",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "14--16",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035341",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Customer classification and prioritization are
commonly used in many applications to provide queue
preferential service. Their influence on queuing
systems has been thoroughly studied from the delay
distribution perspective. However, the fairness
aspects, which are inherent to any preferential system
and highly important to customers, have hardly been
studied and not been quantified to date. In this work
we use the Resource Allocation Queueing Fairness
Measure (RAQFM) to analyze such systems and derive
their relative fairness values. We also analyze the
effect multiple servers have on fairness, showing that
multiple servers increase the fairness of the
system.1",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feng:2004:RBC,
author = "Hanhua Feng and Vishal Misra",
title = "On the relationship between coefficient of variation
and the performance of {M/G/1-FB} queues",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "17--19",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035342",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we analyze how the coefficient of
variation of the service time distribution affects the
mean sojourn time of M/G/1-FB queues. The results show
that the coefficient of variation is a necessary but
not sufficient measure to characterize heavy-tailed
distributions in term of the performance under the FB
policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2004:DSM,
author = "Junxia Chang and Hayriye Ayhan and Jim Dai",
title = "Dynamic scheduling of multiclass open queueing
networks in a slowly changing environment",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "20--21",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035343",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The popularity and importance of Web have increased
dramatically in the past few years as well as the
complexity of Web server systems. Workload
characterization studies reveal that there exist strong
time-of-day effects in the Web traffic. Many Web sites
have sustained and higher hit rates during certain time
periods of a day than other time periods. During the
peak hours, the Web servers may even be overloaded.
Simple stochastic processes with a fixed rate fails to
capture this time varying characteristic of the Web
systems. Therefore, we herein consider that the Web
system is operating in a changing environment. Whenever
the environment changes state, the arrival rates of
user requests change as well as the service rates and
the routing decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2004:KPP,
author = "Vladimir Marbukh",
title = "A knowledge plane as a pricing mechanism for
aggregate, user-centric utility maximization",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "22--24",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035344",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes pricing user centric requirements
as a potential role for the Knowledge Plane. Assuming
elastic users capable of modifying their behavior in
response to the pricing signals, this approach may
result in optimal resource allocation without necessity
for the users to acquire detailed information on the
network state as well as advanced knowledge of the user
requirements by the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "elastic users; network; performance; pricing;
utility",
}
@Article{Lin:2004:CMM,
author = "Wuqin Lin and Zhen Liu and Cathy H. Xia and Li Zhang",
title = "Cost minimization of multi-tiered e-business
infrastructure with end-to-end delay guarantees",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "25--27",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035345",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-Business has become a cost effective solution for
many traditional businesses and a critical component of
many companies to such a degree that guaranteeing the
performance and availability is vital. The design and
development of e-business infrastructure should meet a
twofold challenge. On one hand, it must meet customer
expectations in terms of quality of service (QoS). On
the other hand, companies have to control IT costs to
stay competitive. It is therefore crucial to understand
the tradeoff between costs and service levels so as to
enable the determination of the most cost-effective
architecture and system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adler:2004:TOP,
author = "Micah Adler and Rakesh Kumar and Keith Ross and Dan
Rubenstein and David Turner and David D. Yao",
title = "Two optimal peer selection problems",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "28--30",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035346",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many peer computers today participate in peer-to-peer
file sharing applications in which the computers
contribute storage and bandwidth resources. Of course,
applications can only harness the resource pool if
peers make available their surplus resources to them.
It is widely documented, however, that the P2P systems
are havens for `free riders': a significant fraction of
users do not contribute any resources, and a minute
fraction of users contribute the majority of the
resources. Clearly, to improve the performance of
existing P2P file sharing systems, and to enable new
classes of P2P applications, a compelling incentive
system needs to be put in place.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2004:CDS,
author = "E. G. Coffman and Andreas Constantinides and Dan
Rubenstein and Bruce Shepherd and Angelos Stavrou",
title = "Content distribution for seamless transmission",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "31--32",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035347",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new paradigm in information
transmission, the concept of SEAMLESS TRANSMISSION,
whereby any client in a network requesting a file
starts receiving it immediately, and experiences no
delays throughout the remainder of the downloading
time. This notion is based on the partial caching
concept [2] which was introduced to overcome some of
the disadvantages of traditional cache replacement
algorithms such as LRU and LRU-threshold [1]. The main
idea of partial caching is to store an initial part of
the file in the cache and to obtain the rest of the
file from the origin server. To achieve the maximal
retrieval performance of seamless transmission, clients
must be prepared to re-sequence segments of the files
received out of order. With this caveat, seamless
transmission can be viewed as a way to implement strict
quality of service (QoS) guarantees to all clients of a
network. This paper gives a provably correct technique
for achieving seamlessness for a given file located at
the root node in a tree structured network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gamarnik:2004:AOT,
author = "David Gamarnik and Petar Mom{\v{c}}ilovi{\'c}",
title = "An asymptotic optimality of the transposition rule for
linear lists",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "33--34",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035348",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The linear list is one of basic data structures in
computer science with search being a primary operation
defined on it. Items are located in the list by
sequentially examining them from the beginning of the
list. Intuitively one would like to place items that
are frequently requested at the front of the list in
order to minimize the number of items being examined.
Given the properties of the request sequence one could
place items in an order that minimizes the search cost.
Yet often properties of the request sequence are either
not known in advance or time dependent. Hence, it is
desirable to employ self-organizing algorithms. The two
best known such rules are the move-to-front and
transposition rule [9, Section 6]. In addition to being
simple these rules are memory-free, i.e., require no
memory for their operation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "average-case analysis; exclusion process;
self-organizing list",
}
@Article{Baryshnikov:2004:SAT,
author = "Yuliy Baryshnikov and Ed Coffman and Petar
Mom{\v{c}}ilovi{\'c}",
title = "Self assembly times in {DNA}-based computation",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "35--37",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035349",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Speed of computation and power consumption are the two
main parameters of conventional computing devices
implemented in microelectronic circuits. As performance
of such devices approaches physical limits, new
computing paradigms are emerging. Two paradigms
receiving great attention are quantum and DNA-based
molecular computing.\par
This paper focuses on DNA-based computing. This concept
can be abstracted to growth models where computational
elements called tiles are self-assembled one by one,
subject to some simple hierarchical rules, to fill a
given template encoding a Boolean formula. While
DNA-based computational devices are known to be
extremely energy efficient, little is known concerning
the fundamental question of computation times. In
particular, given a function, we study the time
required to determine its value for a given input. In
the simplest instance, the analysis has interesting
connections with interacting particle systems and
variational problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Saniee:2004:PDS,
author = "Iraj Saniee and Indra Widjaja and John Morrison",
title = "Performance of a distributed scheduling protocol for
{TWIN}",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "38--40",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035350",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses a scheduling mechanism for a new
network architecture (TWIN) that provides arbitrary
capacity up to a wavelength to any source-destination
pair as needed, without optical-to-electronic
conversion. The network emulates ultra-fast switching
in the passive network core through the use of
ultra-fast wavelength tunable lasers at the network
edge. This architecture is suitable for any end-to-end
traffic load, from static or quasi-static load (Sonet),
to highly dynamic (IP) load. The key enabler of this
architecture is a scheduling mechanism that schedules
transmissions for maximal throughput. We propose a
distributed scheduling scheme that is randomized for
highly dynamic load and can learn to adjust for
quasi-static load. We derive analytical formulae for
the performance of the proposed scheme when load is
highly dynamic, show that it outperforms standard
protocols (such as aloha) and illustrate the effect of
learning for quasi-static load through simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bekker:2004:ITF,
author = "Ren{\'e} Bekker and Sem Borst and Rudesindo
N{\'e}{\~n}ez-Queija",
title = "Integration of {TCP}-friendly streaming sessions and
heavy-tailed elastic flows",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "41--43",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a fixed number of streaming sessions
sharing a bottleneck link with a dynamic population of
elastic flows. We assume that the sizes of the elastic
flows exhibit heavy-tailed characteristics. The elastic
flows are TCP-controlled, while the transmission rates
of the streaming applications are governed by a
so-called TCP-friendly rate control
protocol.\par
Adopting the Processor-Sharing (PS) discipline to model
the bandwidth sharing, we investigate the tail
distribution of the deficit in service received by the
streaming sessions compared to a nominal service
target. The latter metric provides an indication for
the quality experienced by the streaming applications.
The results yield valuable qualitative insight into the
occurrence of persistent quality disruption for the
streaming users. We also examine the delay performance
of the elastic flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vanKessel:2004:ARA,
author = "Gijs van Kessel and Rudesindo N{\'u}{\~n}ez-Queija and
Sem Borst",
title = "Asymptotic regimes and approximations for
discriminatory processor sharing",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "44--46",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the joint queue length distribution of the
Discriminatory Processor Sharing model, assuming all
classes have phase-type service requirement
distributions. We show that the moments of the joint
queue length distribution can be obtained by solving
linear equations. We use this to study the system in
two asymptotic regimes. In the first regime, the
different user classes operate on strictly separated
time scales. Then we study the system in heavy
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cui:2004:ODM,
author = "Yi Cui and Yuan Xue and Klara Nahrstedt",
title = "Optimal distributed multicast routing using network
coding: theory and applications",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "47--49",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035353",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Optimal data routing in a network can be often
understood as a multicommodity flow problem. Given a
network and a set of commodities, i.e., a set of
source-destination pairs, one tries to achieve certain
optimization goal, such as minimum delay, maximum
throughput, while maintaining certain fairness among
all commodities. The constraints of such optimization
problems are usually network link capacity and traffic
demand of each commodity. Multicommodity flow problem
has been well studied as a typical linear programming
problem. Its distributed solutions have also been
proposed[2].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2004:CPS,
author = "Xuan Li and David D. Yao",
title = "Control and pricing in stochastic networks with
concurrent resource occupancy",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "50--52",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035354",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Concurrent resource occupancy pervades most
engineering and service systems. For example, a
multi-leg plane trip requires seat reservation on
several connecting flights; a configure-to-order
product demands the simultaneous processing of all its
components; a file transfer on the Internet needs
band-width on all the links along its route from source
to destination. The object of our study is a network
with stochastic concurrent occupancy of resources. The
network can be physical (e.g., a telecommunication
network), or virtual (e.g., the Worldwide Web), or
relational (e.g., the bill of materials of a product,
representing its configuration of all components); and
both the demand/order arrivals and their processing
times required of the resources are stochastic. Our
goal is to do revenue optimization in the network
through two decisions: (a) pricing: to determine the
price charged to each job class and its dynamic
adjustment over time; and (b) resource control: to
regulate the distribution of resources among the job
classes, in particular, when to accept/reject a job and
from which class.\par
Below, we highlight a new fixed-point approximation for
a network operating under a set of thresholds that
control the access of jobs from each class. With this
fixed-point approximation, the resource control problem
takes the form of setting the optimal thresholds, which
can be formulated and solved as a linear program. To
determine the optimal prices then amounts to solving
another set of optimality equations on top of the
linear program. Furthermore, we can show that our
approach via solving optimization problems based on the
fixed-point approximation is optimal in some asymptotic
sense.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Guo:2004:OPR,
author = "Xin Guo and Yingdong Lu and Mark S. Squillante",
title = "Optimal probabilistic routing in distributed parallel
queues",
journal = j-SIGMETRICS,
volume = "32",
number = "2",
pages = "53--54",
month = sep,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1035334.1035355",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:23 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the fundamental problem of routing
customers among distributed parallel queues to minimize
an objective function based on equilibrium sojourn
times under general assumptions for the arrival and
service processes and under the assumption that
customers are routed to the parallel queues in a
probabilistic manner. More specifically, we derive
explicit solutions for the asymptotically optimal
vector of probabilities that control the routing of
customers upon arrival among a set of heterogeneous
general single-server queues through stochastic-process
limits. Our assumption of probabilistic routing is
consistent with previous theoretical studies of this
optimization problem, and our solutions can be used for
the parameter settings of other routing mechanisms
found in practice. Stochastic-process limits are
exploited in order to be able to handle general arrival
and service processes and obtain explicit solutions to
the scheduling optimization problems of interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neto:2004:CBU,
author = "Humberto T. Marques Neto and Jussara M. Almeida and
Leonardo C. D. Rocha and Wagner Meira and Pedro H. C.
Guerra and Virgilio A. F. Almeida",
title = "A characterization of broadband user behavior and
their e-business activities",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "3--13",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a characterization of broadband
user behavior from an Internet Service Provider
standpoint. Users are broken into two major categories:
residential and Small-Office/Home-Office (SOHO). For
each user category, the characterization is performed
along four criteria: (i) session arrival process, (ii)
session duration, (iii) number of bytes transferred
within a session and (iv) user request patterns. Our
results show that both residential and SOHO session
inter-arrival times are exponentially distributed.
Whereas residential session arrival rates remain
relatively high during the day, SOHO session arrival
rates vary much more significantly during the day. On
the other hand, a typical SOHO user session is longer
and transfers a larger volume of data. Furthermore, our
analysis uncovers two main groups of session request
patterns within each user category. The first group
consists of user sessions that use traditional Internet
services, such as e-mail, instant messenger and,
mostly, www services. On the other hand, sessions from
the second group, a smaller group, use typically
peer-to-peer file sharing applications, remain active
for longer periods and transfer a large amount of data.
Looking further into the e-business services most
commonly accessed, we found that subscription-based and
advertising services account for the vast majority of
user HTTP requests in both residential and SOHO
workloads. Understanding these user behavior patterns
is important to the development of more efficient
applications for broadband users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andreolini:2004:FGP,
author = "Mauro Andreolini and Michele Colajanni and Riccardo
Lancellotti and Francesca Mazzoni",
title = "Fine grain performance evaluation of e-commerce
sites",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "14--23",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-commerce sites are still a reference for the Web
technology in terms of complexity and performance
requirements, including availability and scalability.
In this paper we show that a coarse grain analysis,
that is used in most performance studies, may lead to
incomplete or false deductions about the behavior of
the hardware and software components supporting
e-commerce sites. Through a fine grain performance
evaluation of a medium size e-commerce site, we find
some interesting results that demonstrate the
importance of an analysis approach that is carried out
at the software function level with the combination of
distribution oriented metrics instead of average
values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sopitkamol:2004:RCP,
author = "Monchai Sopitkamol",
title = "Ranking configuration parameters in multi-tiered
e-commerce sites",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "24--33",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "E-commerce systems are composed of many components
with several configurable parameters that, if properly
configured, can optimize system performance. Before
upgrading existing systems to overcome performance
bottlenecks, several areas of a site's architecture and
its parameters may be adjusted to improve performance.
This paper provides a method to rank key configurable
e-commerce system parameters that significantly impact
overall system performance, and the performance of the
most significant Web function types. We consider both
on-line and off-line parameters at each of the
e-commerce system layers: Web server, application
server, and database server. In order to accomplish our
task, we designed a practical, ad-hoc approach that
involves conducting experiments on a testbed system
setup as a small e-commerce site. The configurable
parameters are ranked based on their degrees of
performance improvement to the system and to the most
critical Web functions. The performance metrics of
interest include server's response time, system
throughput, and probability of rejecting a customer's
request. The experiments were conducted on an
e-commerce site compliant to the TPC-W benchmark.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DAntonio:2004:ASC,
author = "S. D'Antonio and M. Esposito and S. P. Romano and G.
Ventre",
title = "Assessing the scalability of component-based
frameworks: the {CADENUS} case study",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "34--43",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052311",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes an approach to scalability
analysis of component-based systems. A theoretical
model of the orchestrated behavior of a system's
components is developed and potential bottlenecks are
identified. The model is derived by performing an
analysis of the average number of messages that each
involved entity has to deal with, i.e. receive,
elaborate and possibly forward. By appropriately
setting the various model parameters, it is possible to
evaluate a system's behavior in a number of different
scenarios. The model itself is based upon a queuing
network paradigm, whereby each component is associated
with a `service centre' characterized by specific
values of both the message arrival rate and the service
time: based on such values, the utilization coefficient
of the service centers is computed and the potential
bottlenecks are identified. The queuing network model
is also exploited to evaluate the performance of the
overall system under various configurations. The
proposed approach is introduced and developed by taking
the CADENUS system as a running example. CADENUS is a
component-based framework designed and developed within
a recent IST project, whose main goal resides in the
provisioning of Premium IP services by means of an
effective application of the so-called {\em mediation
paradigm.\/}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mediation; network protocols; probability theory;
queuing networks; scalability",
}
@Article{Ye:2004:RRS,
author = "Tao Ye and Shivkumar Kalyanaraman",
title = "A recursive random search algorithm for network
parameter optimization",
journal = j-SIGMETRICS,
volume = "32",
number = "3",
pages = "44--53",
month = dec,
year = "2004",
CODEN = "????",
DOI = "https://doi.org/10.1145/1052305.1052306",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:25 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a new heuristic search algorithm,
Recursive Random Search(RRS), for black-box
optimization problems. Specifically, this algorithm is
designed for the dynamical parameter optimization of
network protocols which emphasizes on obtaining good
solutions within a limited time frame rather than full
optimization. The RRS algorithm is based on the initial
high-efficiency property of random sampling and
attempts to maintain this high-efficiency by constantly
`restarting' random sampling with adjusted sample
spaces. Due to its basis on random sampling, the RRS
algorithm is robust to the effect of random noises in
the objective function and it performs especially
efficiently when handling the objective functions with
negligible parameters. These properties have been
demonstrated with the tests on a suite of benchmark
functions. The RRS algorithm has been successfully
applied to the optimal configuration of several network
protocols. One application to a network routing
algorithm is presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haverkort:2005:PV,
author = "Boudewijn R. Haverkort and Joost-Pieter Katoen",
title = "Performance and verification",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "3--3",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Some twenty five years ago, the field of
computer-communication system performance evaluation
and the field of formal specification and verification
were regarded as completely disjunct. The former field
focussed on the quantitative aspects of system
behaviour, expressed in measures such as delays,
throughputs and loss probabilities, whereas the latter
field focussed on the qualitative aspects of system
behaviour, expressed in measures (or, properties) such
as system liveness, deadlock freeness and safety. Over
the years, however, this distinction has shown to be
not always useful. In fact, we see a large variety of
systems for which the qualitative behaviour cannot be
decoupled from the quantitative aspect. Think for
instance of communication protocols in an embedded
system setting: the qualitative correctness of a
protocol, without considering (absolute) timing
aspects, is not enough for classifying a protocol as
correct. Indeed, only when the protocol behaves as it
should, and does so {\em in a timely manner,\/} the
protocol can be regarded as correct. Observations of
this kind have lead to a variety of integrated
approaches toward performance evaluation and
verification.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:2005:IDS,
author = "Gianfranco Ciardo and Andrew S. Miner",
title = "Implicit data structures for logic and stochastic
systems analysis",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "4--9",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both logic and stochastic analysis have strong
theoretical underpinnings, but they have been
traditionally relegated to separate areas of computer
science, the former focusing on logic and discrete
algorithms, the latter on exact or approximate
numerical methods. In the last few years, though, there
has been a convergence of research in these two areas,
due to the realization that data structures used in one
area can benefit the other and that, by merging the
goals of the two areas, a more integrated approach to
system analysis can be derived. In this paper, we
describe some of the beneficial interactions between
the two, and some of the research challenges ahead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baier:2005:MCM,
author = "Christel Baier and Boudewijn R. Haverkort and Holger
Hermanns and Joost-Pieter Katoen",
title = "Model checking meets performance evaluation",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "10--15",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov chains are one of the most popular models for
the evaluation of performance and dependability of
information processing systems. To obtain performance
measures, typically long-run or transient state
probabilities of Markov chains are determined.
Sometimes the Markov chain at hand is equipped with
rewards and computations involve determining long-run
or instantaneous reward probabilities. This note
summarises a technique to determine performance and
dependability {\em guarantees\/} of Markov chains.
Given a precise description of the desired guarantee,
all states in the Markov chain are determined that
surely meet the guarantee. This is done in a fully
automated way. Guarantees are described using logics.
The use of logics yields an expressive framework that
allows to express well-known measures, but also (new)
intricate and complex performance guarantees. The power
of this technique is that no matter how complex the
logical guarantee, it is {\em automatically\/} checked
which states in the Markov chain satisfy it. Neither
manual manipulations of Markov chains (or their
high-level descriptions) are needed, nor the knowledge
of any numerical technique to analyze them efficiently.
This applies to any (time-homogeneous) Markov chain of
any structure specified in any high-level formalism.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwiatkowska:2005:PMC,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "Probabilistic model checking in practice: case studies
with {PRISM}",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "16--21",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe some practical applications
of {\em probabilistic model checking,\/} a technique
for the formal analysis of systems which exhibit
stochastic behaviour. We give an overview of a
selection of case studies carried out using the
probabilistic model checking tool PRISM, demonstrating
the wide range of application domains to which these
methods are applicable. We also illustrate several
benefits of using formal verification techniques to
analyse probabilistic systems, including: (i) that they
allow a wide range of numerical properties to be
computed accurately; and (ii) that they perform a
complete and exhaustive analysis enabling, for example,
a study of best- and worst-case scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baier:2005:PVM,
author = "Christel Baier and Frank Ciesinski and Marcus
Gr{\"o}{\ss}er",
title = "{ProbMela} and verification of {Markov} decision
processes",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "22--27",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markov decision processes (MDP) can serve as
operational model for probabilistic distributed systems
and yield the basis for model checking algorithms
against qualitative or quantitative properties. In this
paper, we summarize the main steps of a quantitative
analysis for a given MDP and formula of linear temporal
logic, give an introduction to the modelling language
ProbMela which provides a simple and intuitive way to
describe complex systems with a MDP-semantics and
present the basic features of the MDP model checker
LiQuor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jansen:2005:QMA,
author = "David N. Jansen and Holger Hermanns",
title = "{QoS} modelling and analysis with {UML} statecharts:
the {StoCharts} approach",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "28--33",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The UML is an influential and widespread notation for
high-level modelling of information processing systems.
UML statechart diagrams are a graphical language to
describe system behaviour. They constitute one of the
most intensively-used formalisms comprised by the UML.
However, statechart diagrams are lacking concepts for
describing real-time, performance, dependability and
quality of service (QoS) characteristics at a
behavioural level. This note describes a QoS-oriented
extension of UML statechart diagrams, called StoCharts.
StoCharts enhance the basic statechart formalism with
two distinguished features, both simple and easy to
understand, yet powerful enough to model a sufficiently
rich class of stochastic processes. This is illustrated
by a selection of case studies performed using
StoCharts. We review the main ingredients of StoCharts
and survey tool support and case studies performed with
the language, and place StoCharts in the context of
other extensions of statechart diagrams.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Behrmann:2005:OSU,
author = "Gerd Behrmann and Kim G. Larsen and Jacob I.
Rasmussen",
title = "Optimal scheduling using priced timed automata",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "34--40",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This contribution reports on the considerable effort
made recently towards extending and applying
well-established timed automata technology to optimal
scheduling and planning problems. The effort of the
authors in this direction has to a large extent been
carried out as part of the European projects VHS [20]
and AMETIST [16] and are available in the recently
released UPPAAL CORA [12], a variant of the real-time
verification tool UPPAAL [18, 5] specialized for
cost-optimal reachability for the extended model of
so-called priced timed automata.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{McIver:2005:ARP,
author = "Annabelle McIver and Carroll Morgan",
title = "Abstraction and refinement in probabilistic systems",
journal = j-SIGMETRICS,
volume = "32",
number = "4",
pages = "41--47",
month = mar,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1059816.1059824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We summarise a verification method for probabilistic
systems that is based on abstraction and refinement,
and extends traditional assertional styles of
verification. The approach makes extensive use of the
{\em expectation transformers of pGCL\/} [17, 16, 13],
a compact probabilistic programming language with an
associated logic of real-valued functions. Analysis of
large systems is made tractable by abstraction which,
together with algebraic and logical reasoning, results
in strong and general guarantees about
probabilistic-system properties. Although our examples
are specific (to {\em pGCL\/}), our overall goal in
this note is to advocate the hierarchical development
of probabilistic programs via levels of abstraction,
connected by refinement, and to illustrate the proof
obligations incurred by such an approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hoelzle:2005:GHL,
author = "Urs Hoelzle",
title = "{Google}: or how {I} learned to love terabytes",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "1--1",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064213",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Search is one of the most important applications used
on the internet, but it also poses some of the most
interesting challenges in computer science. Providing
high-quality search requires understanding across a
wide range of computer science disciplines, from
lower-level systems issues like computer architecture
and distributed systems to applied areas like
information retrieval, machine learning, data mining,
and user interface design. In this talk I'll share some
interesting observations and measurements obtained at
Google, and will illustrate the behind-the-scenes
pieces of infrastructure (both hardware and software)
that we've built in order to extract this information
from many terabytes of data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massoulie:2005:CRS,
author = "Laurent Massouli{\'e} and Milan Vojnovi{\'C}",
title = "Coupon replication systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "2--13",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064215",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the study of peer-to-peer file swarming
systems {\`a} la BitTorrent, we introduce a
probabilistic model of {\em coupon replication
systems}. These systems consist of users, aiming to
complete a collection of distinct coupons. Users are
characterised by their current collection of coupons,
and leave the system once they complete their coupon
collection. The system evolution is then specified by
describing how users of distinct types meet, and which
coupons get replicated upon such encounters. For open
systems, with exogenous user arrivals, we derive
necessary and sufficient stability conditions in a
layered scenario, where encounters are between users
holding the same number of coupons. We also consider a
system where encounters are between users chosen
uniformly at random from the whole population. We show
that performance, captured by sojourn time, is
asymptotically optimal in both systems as the number of
coupon types becomes large. We also consider closed
systems with no exogenous user arrivals. In a special
scenario where users have only one missing coupon, we
evaluate the size of the population ultimately
remaining in the system, as the initial number of
users, $N$, goes to infinity. We show that this
decreases geometrically with the number of coupons,
$K$. In particular, when the ratio $K$ /log($N$) is
above a critical threshold, we prove that this number
of left-overs is of order $ \log (\log (N))$. These
results suggest that performance of file swarming
systems does not depend critically on either altruistic
user behavior, or on load balancing strategies such as
{\em rarest first}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content distribution; file swarming; peer-to-peer",
}
@Article{Tang:2005:LTO,
author = "Chunqiang Tang and Melissa J. Buco and Rong N. Chang
and Sandhya Dwarkadas and Laura Z. Luan and Edward So
and Christopher Ward",
title = "Low traffic overlay networks with large routing
tables",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "14--25",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064216",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The routing tables of Distributed Hash Tables (DHTs)
can vary from size $ O(1) $ to $ O(n) $. Currently,
what is lacking is an analytic framework to suggest the
optimal routing table size for a given workload. This
paper (1) compares DHTs with $ O(1) $ to $ O(n) $
routing tables and identifies some good design points;
and (2) proposes protocols to realize the potential of
those good design points. We use total traffic as the
uniform metric to compare heterogeneous DHTs and
emphasize the balance between maintenance cost and
lookup cost. Assuming a node on average processes 1,000
or more lookups during its entire lifetime, our
analysis shows that large routing tables actually lead
to both low traffic and low lookup hops. These good
design points translate into one-hop routing for
systems of medium size and two-hop routing for large
systems. Existing one-hop or two-hop protocols are
based on a hierarchy. We instead demonstrate that it is
possible to achieve completely decentralized one-hop or
two-hop routing, i.e., without giving up being
peer-to-peer. We propose 1h-Calot for one-hop routing
and 2h-Calot for two-hop routing. Assuming a moderate
lookup rate, compared with DHTs that use $ O(\log n) $
routing tables, 1h-Calot and 2h-Calot save traffic by
up to 70\% while resolving lookups in one or two hops
as opposed to $ O(\log n) $ hops.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed hash table; overlay network; peer-to-peer
system",
}
@Article{Leonard:2005:LBN,
author = "Derek Leonard and Vivek Rai and Dmitri Loguinov",
title = "On lifetime-based node failure and stochastic
resilience of decentralized peer-to-peer networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "26--37",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064217",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To understand how high rates of churn and random
departure decisions of end-users affect connectivity of
P2P networks, this paper investigates resilience of
random graphs to lifetime-based node failure and
derives the expected delay before a user is forcefully
isolated from the graph and the probability that this
occurs within his/her lifetime. Our results indicate
that systems with heavy-tailed lifetime distributions
are more resilient than those with light-tailed (e.g.,
exponential) distributions and that for a given average
degree, $k$-regular graphs exhibit the highest
resilience. As a practical illustration of our results,
each user in a system with $n$ = 100 billion peers,
30-minute average lifetime, and 1-minute
node-replacement delay can stay connected to the graph
with probability $ 1 - 1 / n$ using only 9 neighbors.
This is in contrast to 37 neighbors required under
previous modeling efforts. We finish the paper by
showing that many P2P networks are {\em almost
surely\/} (i.e., with probability $ 1 - o(1)$)
connected if they have no isolated nodes and derive a
simple model for the probability that a P2P system
partitions under churn.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Pareto; peer-to-peer; stochastic lifetime resilience",
}
@Article{Dumitriu:2005:DSR,
author = "D. Dumitriu and E. Knightly and A. Kuzmanovic and I.
Stoica and W. Zwaenepoel",
title = "Denial-of-service resilience in peer-to-peer file
sharing systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "38--49",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064218",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peer-to-peer (p2p) file sharing systems are
characterized by highly replicated content distributed
among nodes with enormous aggregate resources for
storage and communication. These properties alone are
not sufficient, however, to render p2p networks immune
to denial-of-service (DoS) attack. In this paper, we
study, by means of analytical modeling and simulation,
the resilience of p2p file sharing systems against DoS
attacks, in which malicious nodes respond to queries
with erroneous responses. We consider the file-targeted
attacks in current use in the Internet, and we
introduce a new class of p2p-network-targeted attacks.
In file-targeted attacks, the attacker puts a large
number of corrupted versions of a {\em single\/} file
on the network. We demonstrate that the effectiveness
of these attacks is highly dependent on the clients'
behavior. For the attacks to succeed over the long
term, clients must be unwilling to share files, slow in
removing corrupted files from their machines, and quick
to give up downloading when the system is under attack.
In network-targeted attacks, attackers respond to
queries for {\em any\/} file with erroneous
information. Our results indicate that these attacks
are highly scalable: increasing the number of malicious
nodes yields a hyperexponential decrease in system
goodput, and a moderate number of attackers suffices to
cause a near-collapse of the entire system. The key
factors inducing this vulnerability are (i)
hierarchical topologies with misbehaving `supernodes,'
(ii) high path-length networks in which attackers have
increased opportunity to falsify control information,
and (iii) power-law networks in which attackers insert
themselves into high-degree points in the graph.
Finally, we consider the effects of client
counter-strategies such as randomized reply selection,
redundant and parallel download, and reputation
systems. Some counter-strategies (e.g., randomized
reply selection) provide considerable immunity to
attack (reducing the scaling from hyperexponential to
linear), yet significantly hurt performance in the
absence of an attack. Other counter-strategies yield
little benefit (or penalty). In particular, reputation
systems show little impact unless they operate with
near perfection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "denial of service; file pollution; network-targeted
attacks; peer-to-peer",
}
@Article{Moore:2005:ITC,
author = "Andrew W. Moore and Denis Zuev",
title = "{Internet} traffic classification using {Bayesian}
analysis techniques",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "50--60",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064220",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Accurate traffic classification is of fundamental
importance to numerous other network activities, from
security monitoring to accounting, and from Quality of
Service to providing operators with useful forecasts
for long-term provisioning. We apply a Na{\"\i}ve Bayes
estimator to categorize traffic by application.
Uniquely, our work capitalizes on hand-classified
network data, using it as input to a supervised
Na{\"\i}ve Bayes estimator. In this paper we illustrate
the high level of accuracy achievable with the
Na{\"\i}ve Bayes estimator. We further illustrate the
improved accuracy of refined variants of this
estimator. Our results indicate that with the simplest
of Na{\"\i}ve Bayes estimator we are able to achieve
about 65\% accuracy on per-flow classification and with
two powerful refinements we can improve this value to
better than 95\%; this is a vast improvement over
traditional techniques that achieve 50--70\%. While our
technique uses training data, with categories derived
from packet-content, all of our training and testing
was done using header-derived discriminators. We
emphasize this as a powerful aspect of our approach:
using samples of well-known traffic to allow the
categorization of traffic using commonly available
information alone.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flow classification; Internet traffic; traffic
identification",
}
@Article{Kumar:2005:DSA,
author = "Abhishek Kumar and Minho Sung and Jun (Jim) Xu and
Ellen W. Zegura",
title = "A data streaming algorithm for estimating
subpopulation flow size distribution",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "61--72",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Statistical information about the flow sizes in the
traffic passing through a network link helps a network
operator to characterize network resource usage, infer
traffic demands, detect traffic anomalies, and improve
network performance through traffic engineering.
Previous work on estimating the flow size distribution
for the {\em complete population\/} of flows has
produced techniques that either make inferences from
sampled network traffic, or use data streaming
approaches. In this work, we identify and solve a more
challenging problem of estimating the size distribution
and other statistical information about {\em arbitrary
subpopulations\/} of flows. Inferring subpopulation
flow statistics is more challenging than the complete
population counterpart, since subpopulations of
interest are often specified {\em a posteriori\/}
(i.e., after the data collection is done), making it
impossible for the data collection module to `plan in
advance'. Our solution consists of a novel mechanism
that combines data streaming with traditional packet
sampling to provide highly accurate estimates of
subpopulation flow statistics. The algorithm employs
two data collection modules operating in parallel --- a
NetFlow-like packet sampler and a streaming data
structure made up of an array of counters. Combining
the data collected by these two modules, our estimation
algorithm uses a statistical estimation procedure that
correlates and decodes the outputs (observations) from
both data collection modules to obtain flow statistics
for any arbitrary subpopulation. Evaluations of this
algorithm on real-world Internet traffic traces
demonstrate its high measurement accuracy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; EM algorithm; flow statistics;
statistical inference; traffic analysis",
}
@Article{Cohen:2005:PCL,
author = "Edith Cohen and Carsten Lund",
title = "Packet classification in large {ISPs}: design and
evaluation of decision tree classifiers",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "73--84",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064222",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet classification, although extensively studied,
is an evolving problem. Growing and changing needs
necessitate the use of larger filters with more complex
rules. The increased complexity and size pose
implementation challenges on current hardware solutions
and drive the development of software classifiers, in
particular, decision-tree based classifiers. Important
performance measures for these classifiers are time and
memory due to required high throughput and use of
limited fast memory. We analyze Tier 1 ISP data that
includes filters and corresponding traffic from over a
hundred edge routers and thousands of interfaces. We
provide a comprehensive view on packet classification
in an operational network and glean insights that help
us design more effective classification algorithms. We
propose and evaluate decision tree classifiers with
{\em common branches}. These classifiers have linear
worst-case memory bounds and require much less memory
than standard decision tree classifiers, but
nonetheless, we show that on our data have similar
average and worst-case time performance. We argue that
common-branches exploit structure that is present in
real-life data sets. We observe a strong Zipf-like
pattern in the usage of rules in a classifier, where a
very small number of rules resolves the bulk of traffic
and most rules are essentially never used. Inspired by
this observation, we propose {\em traffic-aware\/}
classifiers that obtain superior average-case and
bounded worst-case performance. Good average-case can
boost performance of software classifiers that can be
used in small to medium sized routers and are also
important for traffic analysis and traffic
engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "access control lists; decision trees; packet
filtering; routing",
}
@Article{Keys:2005:RSA,
author = "Ken Keys and David Moore and Cristian Estan",
title = "A robust system for accurate real-time summaries of
{Internet} traffic",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "85--96",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064223",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Good performance under extreme workloads and isolation
between the resource consumption of concurrent jobs are
perennial design goals of computer systems ranging from
multitasking servers to network routers. In this paper
we present a specialized system that computes multiple
summaries of IP traffic in real time and achieves
robustness and isolation between tasks in a novel way:
by automatically adapting the parameters of the
summarization algorithms. In traditional systems,
anomalous network behavior such as denial of service
attacks or worms can overwhelm the memory or CPU,
making the system produce meaningless results exactly
when measurement is needed most. In contrast, our
measurement system reacts by gracefully degrading the
accuracy of the affected summaries. The types of
summaries we compute are widely used by network
administrators monitoring the workloads of their
networks: the ports sending the most traffic, the IP
addresses sending or receiving the most traffic or
opening the most connections, etc. We evaluate and
compare many existing algorithmic solutions for
computing these summaries, as well as two new solutions
we propose here: `flow sample and hold' and `Bloom
filter tuple set counting'. Compared to previous
solutions, these new solutions offer better memory
versus accuracy tradeoffs and have more predictable
resource consumption. Finally, we evaluate the actual
implementation of a complete system that combines the
best of these algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive response; measurement; passive monitoring;
sampling; traffic estimation",
}
@Article{Choi:2005:PCW,
author = "Sunwoong Choi and Kihong Park and Chong-kwon Kim",
title = "On the performance characteristics of {WLANs}:
revisited",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "97--108",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064225",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wide-spread deployment of infrastructure WLANs has
made Wi-Fi an integral part of today's Internet access
technology. Despite its crucial role in affecting
end-to-end performance, past research has focused on
MAC protocol enhancement, analysis and simulation-based
performance evaluation without sufficient consideration
for modeling inaccuracies stemming from inter-layer
dependencies, including physical layer diversity, that
significantly impact performance. We take a fresh look
at IEEE 802.11 WLANs, and using a combination of
experiment, simulation, and analysis demonstrate its
surprisingly agile performance traits. Our main
findings are two-fold. First, contention-based MAC
throughput degrades gracefully under congested
conditions, enabled by physical layer channel diversity
that reduces the effective level of MAC contention. In
contrast, fairness and jitter significantly degrade at
a critical offered load. This duality obviates the need
for link layer flow control for throughput improvement
but necessitates traffic control for fairness and QoS.
Second, TCP-over-WLAN achieves high throughput
commensurate with that of wireline TCP under saturated
conditions, challenging the widely held perception that
TCP throughput fares poorly over WLANs when subject to
heavy contention. We show that TCP-over-WLAN prowess is
facilitated by the self-regulating actions of DCF and
TCP congestion control that jointly drive the shared
physical channel at an effective load of 2--3 wireless
stations, even when the number of active stations is
very large. Our results highlight subtle inter-layer
dependencies including the mitigating influence of
TCP-over-WLAN on dynamic rate shifting.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DCF performance; inter-layer dependence; physical
layer diversity; rate control; TCP-over-WLAN
performance",
}
@Article{Ramaiyan:2005:FPA,
author = "Venkatesh Ramaiyan and Anurag Kumar and Eitan Altman",
title = "Fixed point analysis of single cell {IEEE 802.11e}
{WLANs}: uniqueness, multistability and throughput
differentiation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "109--120",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064226",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the vector fixed point equations arising
out of the analysis of the saturation throughput of a
single cell IEEE 802.11e wireless local area network
with nodes that have different back-off parameters,
including different Arbitration InterFrame Space (AIFS)
values. We consider balanced and unbalanced solutions
of the fixed point equations arising in homogeneous and
nonhomogeneous networks. We are concerned, in
particular, with (i) whether the fixed point is
balanced within a class, and (ii) whether the fixed
point is unique. Our simulations show that when
multiple unbalanced fixed points exist in a homogeneous
system then the time behaviour of the system
demonstrates severe short term unfairness (or {\em
multistability\/}). Implications for the use of the
fixed point formulation for performance analysis are
also discussed. We provide a condition for the fixed
point solution to be balanced within a class, and also
a condition for uniqueness. We then provide an
extension of our general fixed point analysis to
capture AIFS based differentiation; again a condition
for uniqueness is established. An asymptotic analysis
of the fixed point is provided for the case in which
packets are never abandoned, and the number of nodes
goes to $ \infty $. Finally the fixed point equations
are used to obtain insights into the throughput
differentiation provided by different initial
back-offs, persistence factors, and AIFS, for finite
number of nodes, and for differentiation parameter
values similar to those in the standard.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "EDCF analysis; performance of wireless LANs; QoS in
wireless LANs; short term unfairness",
}
@Article{Lindemann:2005:MEI,
author = "Christoph Lindemann and Oliver P. Waldhorst",
title = "Modeling epidemic information dissemination on mobile
devices with finite buffers",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "121--132",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064227",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Epidemic algorithms have recently been proposed as an
effective solution for disseminating information in
large-scale peer-to-peer (P2P) systems and in mobile ad
hoc networks (MANET). In this paper, we present a
modeling approach for steady-state analysis of epidemic
dissemination of information in MANET. As major
contribution, the introduced approach explicitly
represents the spread of multiple data items, finite
buffer capacity at mobile devices and a least recently
used buffer replacement scheme. Using the introduced
modeling approach, we analyze seven degrees of
separation (7DS) as one well-known approach for
implementing P2P data sharing in a MANET using epidemic
dissemination of information. A validation of results
derived from the analytical model against simulation
shows excellent agreement. Quantitative performance
curves derived from the analytical model yield several
insights for optimizing the system design of 7DS.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical performance modeling; mobile ad hoc
networks; peer-to-peer data sharing;
performance-oriented design and evaluation studies of
distributed systems",
}
@Article{Kumar:2005:AAC,
author = "V. S. Anil Kumar and Madhav V. Marathe and Srinivasan
Parthasarathy and Aravind Srinivasan",
title = "Algorithmic aspects of capacity in wireless networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "133--144",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers two inter-related questions: (i)
Given a wireless ad-hoc network and a collection of
source-destination pairs $ (i, t i) $, what is the
maximum throughput capacity of the network, i.e. the
rate at which data from the sources to their
corresponding destinations can be transferred in the
network? (ii) Can network protocols be designed that
jointly route the packets and schedule transmissions at
rates close to the maximum throughput capacity? Much of
the earlier work focused on random instances and proved
analytical lower and upper bounds on the maximum
throughput capacity. Here, in contrast, we consider
arbitrary wireless networks. Further, we study the
algorithmic aspects of the above questions: the goal is
to design provably good algorithms for arbitrary
instances. We develop analytical performance evaluation
models and distributed algorithms for routing and
scheduling which incorporate fairness, energy and
dilation (path-length) requirements and provide a
unified framework for utilizing the network close to
its maximum throughput capacity. Motivated by certain
popular wireless protocols used in practice, we also
explore `shortest-path like' path selection strategies
which maximize the network throughput. The theoretical
results naturally suggest an interesting class of
congestion aware link metrics which can be directly
{\em plugged into\/} several existing routing protocols
such as AODV, DSR, etc. We complement the theoretical
analysis with extensive simulations. The results
indicate that routes obtained using our congestion
aware link metrics consistently yield higher throughput
than hop-count based shortest path metrics.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity modeling; end-to-end scheduling; linear
programming; wireless networks",
}
@Article{Chen:2005:EEM,
author = "Zhifeng Chen and Yan Zhang and Yuanyuan Zhou and Heidi
Scott and Berni Schiefer",
title = "Empirical evaluation of multi-level buffer cache
collaboration for storage systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "145--156",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To bridge the increasing processor-disk performance
gap, buffer caches are used in both storage clients
(e.g. database systems) and storage servers to reduce
the number of slow disk accesses. These buffer caches
need to be managed effectively to deliver the
performance commensurate to the aggregate buffer cache
size. To address this problem, two paradigms have been
proposed recently to {\em collaboratively\/} manage
these buffer caches together: the {\em hierarchy-aware
caching\/} maintains the same I/O interface and is
fully transparent to the storage client software, and
the {\em aggressively-collaborative caching\/} trades
off transparency for performance and requires changes
to both the interface and the storage client software.
Before storage industry starts to implement
collaborative caching in real systems, it is crucial to
find out whether sacrificing transparency is really
worthwhile, i.e., how much can we gain by using the
aggressively-collaborative caching instead of the
hierarchy-aware caching? To accurately answer this
question, it is required to consider all possible
combinations of recently proposed local replacement
algorithms and optimization techniques in both
collaboration paradigms. Our study provides an
empirical evaluation to address the above questions.
Particularly, we have compared three
aggressively-collaborative approaches with two
hierarchy-aware approaches for four different types of
database/file I/O workloads using traces collected from
real commercial systems such as IBM DB2. More
importantly, we separate the effects of collaborative
caching from local replacement algorithms and
optimizations, and uniformly apply several recently
proposed local replacement algorithms and optimizations
to all five collaboration approaches. When appropriate
local optimizations and replacement algorithms are
uniformly applied to both hierarchy-aware and
aggressively-collaborative caching, the results
indicate that hierarchy-aware caching can deliver
similar performance as aggressively-collaborative
caching. The results show that the
aggressively-collaborative caching only provides less
than 2.5\% performance improvement on average in
simulation and 1.0\% in real system experiments over
the hierarchy-aware caching for most workloads and
cache configurations. Our sensitivity study indicates
that the performance gain of aggressively-collaborative
caching is also very small for various storage networks
and different cache configurations. Therefore,
considering its simplicity and generality,
hierarchy-aware caching is more feasible than
aggressively-collaborative caching.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "collaborative caching; database; file system; storage
system",
}
@Article{Butt:2005:PIK,
author = "Ali R. Butt and Chris Gniady and Y. Charlie Hu",
title = "The performance impact of kernel prefetching on buffer
cache replacement algorithms",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "157--168",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064231",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A fundamental challenge in improving the file system
performance is to design effective block replacement
algorithms to minimize buffer cache misses. Despite the
well-known interactions between prefetching and
caching, almost all buffer cache replacement algorithms
have been proposed and studied comparatively without
taking into account file system prefetching which
exists in all modern operating systems. This paper
shows that such kernel prefetching can have a
significant impact on the relative performance in terms
of the number of actual disk I/Os of many well-known
replacement algorithms; it can not only narrow the
performance gap but also change the relative
performance benefits of different algorithms. These
results demonstrate the importance for buffer caching
research to take file system prefetching into
consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "buffer caching; prefetching; replacement algorithms",
}
@Article{Berg:2005:FDL,
author = "Erik Berg and Erik Hagersten",
title = "Fast data-locality profiling of native execution",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "169--180",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance tools based on hardware counters can
efficiently profile the cache behavior of an
application and help software developers improve its
cache utilization. Simulator-based tools can
potentially provide more insights and flexibility and
model many different cache configurations, but have the
drawback of large run-time overhead. We present
StatCache, a performance tool based on a statistical
cache model. It has a small run-time overhead while
providing much of the flexibility of simulator-based
tools. A monitor process running in the background
collects sparse memory access statistics about the
analyzed application running natively on a host
computer. Generic locality information is derived and
presented in a code-centric and/or data-centric view.
We evaluate the accuracy and performance of the tool
using ten SPEC CPU2000 benchmarks. We also exemplify
how the flexibility of the tool can be used to better
understand the characteristics of cache-related
performance problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache behavior; profiling tool",
}
@Article{Yotov:2005:AMM,
author = "Kamen Yotov and Keshav Pingali and Paul Stodghill",
title = "Automatic measurement of memory hierarchy parameters",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "181--192",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The running time of many applications is dominated by
the cost of memory operations. To optimize such
applications for a given platform, it is necessary to
have a detailed knowledge of the memory hierarchy
parameters of that platform. In practice, this
information is poorly documented if at all. Moreover,
there is growing interest in self-tuning, autonomic
software systems that can optimize themselves for
different platforms; these systems must determine
memory hierarchy parameters automatically without human
intervention. One solution is to use micro-benchmarks
to determine the parameters of the memory hierarchy. In
this paper, we argue that existing micro-benchmarks are
inadequate, and present novel micro-benchmarks for
determining parameters of all levels of the memory
hierarchy, including registers, all data caches and the
translation look-aside buffer. We have implemented
these micro-benchmarks in a tool called X-Ray that can
be ported easily to new platforms. We present
experimental results that show that X-Ray successfully
determines memory hierarchy parameters on current
platforms, and compare its accuracy with that of
existing tools.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "autonomic systems; caches; hardware parameters;
measurement; memory hierarchy; micro-benchmarks;
optimization; self-tuning",
}
@Article{Jonckheere:2005:OIR,
author = "M. Jonckheere and J. Virtamo",
title = "Optimal insensitive routing and bandwidth sharing in
simple data networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "193--204",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064235",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many communication systems can be efficiently modelled
using queueing networks with a stationary distribution
that is insensitive to detailed traffic characteristics
and depends on arrival rates and mean service
requirements only. This robustness enables simple
engineering rules and is thus of considerable practical
interest. In this paper we extend previous results by
relaxing the usual assumption of static routing and
balanced service rates to account for both dynamic
capacity allocation and dynamic load balancing. This
relaxation is necessary to model systems like grid
computing, for instance. Our results identify joint
dynamic allocation and routing policies for single
input reversible networks that are optimal for a wide
range of performance metrics. A simple two-pass
algorithm is presented for finding the optimal policy.
The derived analytical results are applied in a number
of simple numerical examples that illustrate their
modelling potential.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bandwidth allocation; insensitivity; joint
optimization; routing",
}
@Article{Wierman:2005:NIB,
author = "Adam Wierman and Mor Harchol-Balter and Takayuki
Osogami",
title = "Nearly insensitive bounds on {SMART} scheduling",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "205--216",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064236",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We define the class of SMART scheduling policies.
These are policies that bias towards jobs with small
remaining service times, jobs with small original
sizes, or both, with the motivation of minimizing mean
response time and/or mean slowdown. Examples of SMART
policies include PSJF, SRPT, and hybrid policies such
as RS (which biases according to the product of the
remaining size and the original size of a job).For many
policies in the SMART class, the mean response time and
mean slowdown are not known or have complex
representations involving multiple nested integrals,
making evaluation difficult. In this work, we prove
three main results. First, for all policies in the
SMART class, we prove simple upper and lower bounds on
mean response time. Second, we show that all policies
in the SMART class, surprisingly, have very similar
mean response times. Third, we show that the response
times of SMART policies are largely insensitive to the
variability of the job size distribution. In
particular, we focus on the SRPT and PSJF policies and
prove insensitive bounds in these cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "M/G/1; preemptive shortest job first; processor
sharing; PS; PSJF; response time; scheduling; shortest
remaining processing time; SMART; SRPT",
}
@Article{Kortebi:2005:ENA,
author = "A. Kortebi and L. Muscariello and S. Oueslati and J.
Roberts",
title = "Evaluating the number of active flows in a scheduler
realizing fair statistical bandwidth sharing",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "217--228",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064237",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite its well-known advantages, per-flow fair
queueing has not been deployed in the Internet mainly
because of the common belief that such scheduling is
not scalable. The objective of the present paper is to
demonstrate using trace simulations and analytical
evaluations that this belief is misguided. We show that
although the number of flows {\em in progress\/}
increases with link speed, the number that needs
scheduling at any moment is largely independent of this
rate. The number of such {\em active\/} flows is a
random process typically measured in hundreds even
though there may be tens of thousands of flows in
progress. The simulations are performed using traces
from commercial and research networks with quite
different traffic characteristics. Analysis is based on
models for balanced fair statistical bandwidth sharing
and applies properties of queue busy periods to explain
the observed behaviour.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical traffic model; fair queueing; statistical
bandwidth sharing; trace simulations",
}
@Article{Wierman:2005:CSP,
author = "Adam Wierman and Mor Harchol-Balter",
title = "Classifying scheduling policies with respect to higher
moments of conditional response time",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "229--240",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064238",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In addition to providing small mean response times,
modern applications seek to provide users predictable
service and, in some cases, Quality of Service (QoS)
guarantees. In order to understand the predictability
of response times under a range of scheduling policies,
we study the conditional variance in response times
seen by jobs of different sizes. We define a metric and
a criterion that distinguish between contrasting
functional behaviors of conditional variance, and we
then classify large groups of scheduling policies. In
addition to studying the conditional variance of
response times, we also derive metrics appropriate for
comparing higher conditional moments of response time
across job sizes. We illustrate that common statistics
such as raw and central moments are not appropriate
when comparing higher conditional moments of response
time. Instead, we find that cumulant moments should be
used.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cumulants; FB; foreground-background; LAS; least
attained service; M/G/1; predictability; processor
sharing; PS; PSJF; response time; scheduling; SET;
shortest job first; shortest remaining processing time;
SRPT; variance",
}
@Article{Jiang:2005:WIT,
author = "Hao Jiang and Constantinos Dovrolis",
title = "Why is the {Internet} traffic bursty in short time
scales?",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "241--252",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064240",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Internet traffic exhibits multifaceted burstiness and
correlation structure over a wide span of time scales.
Previous work analyzed this structure in terms of
heavy-tailed session characteristics, as well as TCP
timeouts and congestion avoidance, in relatively long
time scales. We focus on shorter scales, typically less
than 100-1000 milliseconds. Our objective is to
identify the actual mechanisms that are responsible for
creating bursty traffic in those scales. We show that
TCP self-clocking, joint with queueing in the network,
can shape the packet interarrivals of a TCP connection
in a two-level ON-OFF pattern. This structure creates
strong correlations and burstiness in time scales that
extend up to the Round-Trip Time (RTT) of the
connection. This effect is more important for bulk
transfers that have a large bandwidth-delay product
relative to their window size. Also, the aggregation of
many flows, without rescaling their packet
interarrivals, does not converge to a Poisson stream,
as one might expect from classical superposition
results. Instead, the burstiness in those scales can be
significantly reduced by TCP pacing. In particular, we
focus on the importance of the minimum pacing timer,
and show that a 10-millisecond timer would be too
coarse for removing short-scale traffic burstiness,
while a 1-millisecond timer would be sufficient to make
the traffic almost as smooth as a Poisson stream in
sub-RTT scales.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "burstiness; ON-OFF model; TCP pacing; TCP
self-clocking; traffic modeling; wavelet-based
multiresolution analysis",
}
@Article{Roughan:2005:FBA,
author = "Matthew Roughan",
title = "Fundamental bounds on the accuracy of network
performance measurements",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "253--264",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064241",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers the basic problem of `how
accurate can we make Internet performance
measurements'. The answer is somewhat counter-intuitive
in that there are bounds on the accuracy of such
measurements, no matter how many probes we can use in a
given time interval, and thus arises a type of
Heisenberg inequality describing the bounds in our
knowledge of the performance of a network. The results
stem from the fact that we cannot make independent
measurements of a system's performance: all such
measures are correlated, and these correlations reduce
the efficacy of measurements. The degree of correlation
is also strongly dependent on system load. The result
has important practical implications that reach beyond
the design of Internet measurement experiments, into
the design of network protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "error estimation; Internet measurement; load
balancing; measurement planning; network performance",
}
@Article{Jain:2005:EEE,
author = "Manish Jain and Constantinos Dovrolis",
title = "End-to-end estimation of the available bandwidth
variation range",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "265--276",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064242",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The available bandwidth (avail-bw) of a network path
is an important performance metric and its end-to-end
estimation has recently received significant attention.
Previous work focused on the estimation of the average
avail-bw, ignoring the significant variability of this
metric in different time scales. In this paper, we show
how to estimate a given percentile of the avail-bw
distribution at a user-specified time scale. If two
estimated percentiles cover the bulk of the
distribution (say 10\% to 90\%), the user can obtain a
practical estimate for the avail-bw variation range. We
present two estimation techniques. The first is
iterative and non-parametric, meaning that it is more
appropriate for very short time scales (typically less
than 100ms), or in bottlenecks with limited flow
multiplexing (where the avail-bw distribution may be
non-Gaussian). The second technique is parametric,
because it assumes that the avail-bw follows the
Gaussian distribution, and it can produce an estimate
faster because it is not iterative. The two techniques
have been implemented in a measurement tool called
Pathvar. Pathvar can track the avail-bw variation range
within 10-20\%, even under non-stationary conditions.
Finally, we identify four factors that play a crucial
role in the variation range of the avail-bw: traffic
load, number of competing flows, rate of competing
flows, and of course the measurement time scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active measurement; bandwidth estimation; network
measurement tools; Pathvar; traffic variability",
}
@Article{Chiang:2005:NUM,
author = "Mung Chiang and J. W. Lee and R. Calderbank and D.
Palomar and M. Fazel",
title = "Network utility maximization with nonconcave, coupled,
and reliability-based utilities",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "277--277",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064246",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network Utility Maximization (NUM) has significantly
extended the classical network flow problem and
provided an emerging framework to design resource
allocation algorithms such as TCP congestion control
and to understand layering as optimization
decomposition. We present a summary of very recent
results in the theory and applications of NUM. We show
new distributed algorithms that converge to the
globally optimal rate allocation for NUM problems with
nonconcave utility functions representing inelastic
flows, with coupled utility functions representing
interference effects or hybrid social-selfish
utilities, and with rate-reliability tradeoff through
adaptive channel coding in the physical layer. We
conclude by discussing how do different decompositions
of a generalized NUM problem correspond to different
layering architectures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiang:2005:OCC,
author = "Mung Chiang and Steven Low",
title = "Optimization and Control of Communication Networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "277--277",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064244",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, there has been a surge in research
activities that utilize the power of recent
developments in nonlinear optimization to tackle a wide
scope of work in the analysis and design of
communication systems, touching every layer of the
layered network architecture, and resulting in both
intellectual and practical impacts significantly beyond
the earlier frameworks. These research activities are
driven by both new demands in the areas of
communications and networking, and new tools emerging
from optimization theory. Such tools include new
developments of powerful theories and highly efficient
computational algorithms for nonlinear convex
optimization, as well as global solution methods and
relaxation techniques for nonconvex optimization.
Optimization theory can be used to analyze, interpret,
or design a communication system, for both
forward-engineering and reverse-engineering. Over the
last few years, it has been successfully applied to a
wide range of communication systems, from the high
speed Internet core to wireless networks, from coding
and equalization to broadband access, and from
information theory to network topology models. Some of
the theoretical advances have also been put into
practice and started making visible impacts, including
new versions of TCP congestion control, power control
and scheduling algorithms in wireless networks, and
spectrum management in DSL broadband access networks.
Under the theme of optimization and control of
communication networks, this Hot Topic Session consists
of five invited talks covering a wide range of issues,
including protocols, pricing, resource allocation,
cross layer design, traffic engineering in the
Internet, optical transport networks, and wireless
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Low:2005:OMI,
author = "Steven Low and J. Doyle and L. Li and A. Tang and J.
Wang",
title = "Optimization model of {Internet} protocols",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "277--277",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064245",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Layered architecture is one of the most fundamental
and influential structures of network design. Can we
integrate the various protocol layers into a single
coherent theory by regarding them as carrying out an
asynchronous distributed primal-dual computation over
the network to implicitly solve a global optimization
problem? Different layers iterate on different subsets
of the decision variables using local information to
achieve individual optimalities, but taken together,
these local algorithms attempt to achieve a global
objective. Such a theory will expose the
interconnection between protocol layers and can be used
to study rigorously the performance tradeoff in
protocol layering as different ways to distribute a
centralized computation. In this talk, we describe some
preliminary work towards this goal and discuss some of
the difficulties of this approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mitra:2005:JPN,
author = "Debasis Mitra",
title = "Joint pricing-network design and stochastic traffic
engineering to manage demand uncertainty",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "278--278",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064247",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I will describe two networking models, together with
their optimization techniques, that span several time
scales. In the longest time scale, where the goal is
capacity planning, I will describe the work of
Bienstock, Raskina, Saniee and Wang that considers
joint pricing and network design of optical transport
networks. Technological innovations are yielding
sharply decreasing unit costs. There is also empirical
evidence that suggests that the elasticity of bandwidth
demand to price is high. Integrating these features in
a unified profit-maximizing model leads to a
large-scale nonlinear optimization problem. In this
work, efficient solution techniques are developed to
maximize the carrier's net present value with respect
to pricing strategies and investment decisions for
technology acquisitions. In the work of Mitra and Wang
the time scale is shorter, the network infrastructure
is fixed, and a model for stochastic traffic
engineering is given in which the optimization is with
respect to bandwidth provisioning and route selection.
Traffic demands are uncertain, and the objective is to
maximize a risk-adjusted measure of network revenue
that is generated by serving demands. Considerable
attention is given to the appropriate measure of risk
in the network model. Risk-mitigation strategies are
also advanced. The optimization model, which is based
on mean-risk analysis, enables a service provider to
maximize a combined measure of mean revenue and revenue
risk. The conditions under which the optimization
problem is an instance of convex programming are
obtained. The solution is shown to satisfy the
stochastic efficiency criterion asymptotically. The
efficient frontier, which is the set of Pareto optimal
pairs of mean revenue and revenue risk, is obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Musacchio:2005:AFR,
author = "John Musacchio and Jean Walrand",
title = "Achieving fair rates with ingress policing",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "278--278",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064249",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a simple ingress policing scheme for a
stochastic queuing network that uses a round-robin
service discipline, and derive conditions under which
the flow rates approach a max-min fair share
allocation. The scheme works as follows: Whenever any
of a flow's queues exceeds a policing threshold, the
network discards that flow's arriving packets at the
network ingress, and does so until all of that flow's
queues fall below their thresholds. To prove our
results, we use previously known results relating the
stability of a queuing system to the stability of its
fluid limit and extend these results to relate the flow
rates of the stochastic system to those of a
corresponding fluid model. In particular, we consider
the fluid limit of a sequence of queuing networks with
increasing thresholds. Using a Lyapunov function
derived from the fluid limits, we find that as the
policing thresholds are increased the state of the
stochastic system is attracted to a relatively smaller
and smaller neighborhood surrounding the equilibrium of
the fluid model. We then show how this property implies
that the achieved flow rates approach the max-min rates
predicted by the fluid model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shroff:2005:OBA,
author = "Ness Shroff and Xiaojun Lin",
title = "An optimization based approach for cross-layer design
in wireless communication networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "278--278",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064248",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this talk we study the issue of cross-layer design
for rate control in multihop wireless networks. We have
developed an optimal cross-layered rate control scheme
that jointly computes both the rate allocation and the
stabilizing schedule that controls the resources at the
underlying layers. However, the scheduling component in
this optimal cross-layered rate control scheme has to
solve a complex global optimization problem at each
time, and is hence too computationally expensive for
online implementation. Thus, we study the impact on the
performance of cross-layer rate control if the network
can only use an imperfect (and potentially distributed)
scheduling component that is easier to implement. We
study scenarios with both fixed number of users as well
as when the number of users change due to arrivals and
departures in the system. In each case, we establish
desirable results on the performance bounds of
cross-layered rate control with imperfect scheduling.
Our cross-layered approach provides provably better
performance bounds when compared with a layered
approach (that does not design rate control and
scheduling together). The insights drawn from our
analyses also enable us to design a fully distributed
cross-layered rate control and scheduling algorithm
under a restrictive interference model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2005:NSC,
author = "Florin Ciucu and Almut Burchard and J{\"o}rg
Liebeherr",
title = "A network service curve approach for the stochastic
analysis of networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "279--290",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064251",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The stochastic network calculus is an evolving new
methodology for backlog and delay analysis of networks
that can account for statistical multiplexing gain.
This paper advances the stochastic network calculus by
deriving a network service curve, which expresses the
service given to a flow by the network as a whole in
terms of a probabilistic bound. The presented network
service curve permits the calculation of statistical
end-to-end delay and backlog bounds for broad classes
of arrival and service distributions. The benefits of
the derived service curve are illustrated for the
exponentially bounded burstiness (EBB) traffic model.
It is shown that end-to-end performance measures
computed with a network service curve are bounded by $
O(H \log H) $, where $H$ is the number of nodes
traversed by a flow. Using currently available
techniques that compute end-to-end bounds by adding
single node results, the corresponding performance
measures are bounded by $ O(H^3)$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network service curve; quality-of-service; stochastic
network calculus",
}
@Article{Urgaonkar:2005:AMM,
author = "Bhuvan Urgaonkar and Giovanni Pacifici and Prashant
Shenoy and Mike Spreitzer and Asser Tantawi",
title = "An analytical model for multi-tier {Internet} services
and its applications",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "291--302",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since many Internet applications employ a multi-tier
architecture, in this paper, we focus on the problem of
analytically modeling the behavior of such
applications. We present a model based on a network of
queues, where the queues represent different tiers of
the application. Our model is sufficiently general to
capture (i) the behavior of tiers with significantly
different performance characteristics and (ii)
application idiosyncrasies such as session-based
workloads, concurrency limits, and caching at
intermediate tiers. We validate our model using real
multi-tier applications running on a Linux server
cluster. Our experiments indicate that our model
faithfully captures the performance of these
applications for a number of workloads and
configurations. For a variety of scenarios, including
those with caching at one of the application tiers, the
average response times predicted by our model were
within the 95\% confidence intervals of the observed
average response times. Our experiments also
demonstrate the utility of the model for dynamic
capacity provisioning, performance prediction,
bottleneck identification, and session policing. In one
scenario, where the request arrival rate increased from
less than 1500 to nearly 4200 requests/min, a dynamic
provisioning technique employing our model was able to
maintain response time targets by increasing the
capacity of two of the application tiers by factors of
2 and 3.5, respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet application; MVA algorithm; queuing model",
}
@Article{Chen:2005:MSE,
author = "Yiyu Chen and Amitayu Das and Wubi Qin and Anand
Sivasubramaniam and Qian Wang and Natarajan Gautam",
title = "Managing server energy and operational costs in
hosting centers",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "303--314",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growing cost of tuning and managing computer
systems is leading to out-sourcing of commercial
services to hosting centers. These centers provision
thousands of dense servers within a relatively small
real-estate in order to host the applications/services
of different customers who may have been assured by a
service-level agreement (SLA). Power consumption of
these servers is becoming a serious concern in the
design and operation of the hosting centers. The
effects of high power consumption manifest not only in
the costs spent in designing effective cooling systems
to ward off the generated heat, but in the cost of
electricity consumption itself. It is crucial to deploy
power management strategies in these hosting centers to
lower these costs towards enhancing profitability. At
the same time, techniques for power management that
include shutting down these servers and/or modulating
their operational speed, can impact the ability of the
hosting center to meet SLAs. In addition, repeated
on-off cycles can increase the wear-and-tear of server
components, incurring costs for their procurement and
replacement. This paper presents a formalism to this
problem, and proposes three new online solution
strategies based on steady state queuing analysis,
feedback control theory, and a hybrid mechanism
borrowing ideas from these two. Using real web server
traces, we show that these solutions are more adaptive
to workload behavior when performing server
provisioning and speed control than earlier heuristics
towards minimizing operational costs while meeting the
SLAs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy management; feedback control; performance
modeling; server provisioning",
}
@Article{Ruan:2005:EIS,
author = "Yaoping Ruan and Vivek S. Pai and Erich Nahum and John
M. Tracey",
title = "Evaluating the impact of simultaneous multithreading
on network servers using real hardware",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "315--326",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper examines the performance of simultaneous
multithreading (SMT) for network servers using actual
hardware, multiple network server applications, and
several workloads. Using three versions of the Intel
Xeon processor with Hyper-Threading, we perform
macroscopic analysis as well as microarchitectural
measurements to understand the origins of the
performance bottlenecks for SMT processors in these
environments. The results of our evaluation suggest
that the current SMT support in the Xeon is application
and workload sensitive, and may not yield significant
benefits for network servers. In general, we find that
enabling SMT on real hardware usually produces only
slight performance gains, and can sometimes lead to
performance loss. In the uniprocessor case, previous
studies appear to have neglected the OS overhead in
switching from a uniprocessor kernel to an SMT-enabled
kernel. The performance loss associated with such
support is comparable to the gains provided by SMT. In
the 2-way multiprocessor case, the higher number of
memory references from SMT often causes the memory
system to become the bottleneck, offsetting any
processor utilization gains. This effect is compounded
by the growing gap between processor speeds and memory
latency. In trying to understand the large gains shown
by simulation studies, we find that while the general
trends for microarchitectural behavior agree with real
hardware, differences in sizing assumptions and
performance models yield much more optimistic benefits
for SMT than we observe.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network server; simultaneous multithreading(SMT)",
}
@Article{Donnet:2005:EAL,
author = "Benoit Donnet and Philippe Raoult and Timur Friedman
and Mark Crovella",
title = "Efficient algorithms for large-scale topology
discovery",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "327--338",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064256",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There is a growing interest in discovery of internet
topology at the interface level. A new generation of
highly distributed measurement systems is currently
being deployed. Unfortunately, the research community
has not examined the problem of how to perform such
measurements efficiently and in a network-friendly
manner. In this paper we make two contributions toward
that end. First, we show that standard topology
discovery methods (e.g., skitter) are quite
inefficient, repeatedly probing the same interfaces.
This is a concern, because when scaled up, such methods
will generate so much traffic that they will begin to
resemble DDoS attacks. We measure two kinds of
redundancy in probing (intra- and inter-monitor) and
show that both kinds are important. We show that
straightforward approaches to addressing these two
kinds of redundancy must take opposite tacks, and are
thus fundamentally in conflict. Our second contribution
is to propose and evaluate Doubletree, an algorithm
that reduces both types of redundancy simultaneously on
routers and end systems. The key ideas are to exploit
the tree-like structure of routes to and from a single
point in order to guide when to stop probing, and to
probe each path by starting near its midpoint. Our
results show that Doubletree can reduce both types of
measurement load on the network dramatically, while
permitting discovery of nearly the same set of nodes
and links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cooperative systems; network topology; traceroutes",
}
@Article{Mao:2005:LPI,
author = "Z. Morley Mao and Lili Qiu and Jia Wang and Yin
Zhang",
title = "On {AS}-level path inference",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "339--349",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ability to discover the AS-level path between two
end-points is valuable for network diagnosis,
performance optimization, and reliability enhancement.
Virtually all existing techniques and tools for path
discovery require direct access to the source. However,
the uncooperative nature of the Internet makes it
difficult to get direct access to any remote end-point.
Path inference becomes challenging when we have no
access to the source or the destination. Moreover even
when we have access to the source and know the forward
path, it is nontrivial to infer the reverse path, since
the Internet routing is often asymmetric. In this
paper, we explore the feasibility of AS-level path
inference without direct access to either end-points.
We describe {\em RouteScope\/} --- a tool for inferring
AS-level paths by finding the shortest policy paths in
an AS graph obtained from BGP tables collected from
multiple vantage points. We identify two main factors
that affect the path inference accuracy: the accuracy
of AS relationship inference and the ability to
determine the first AS hop. To address the issues, we
propose two novel techniques: a new AS relation-ship
inference algorithm, and a novel scheme to infer the
first AS hop by exploiting the TTL information in IP
packets. We evaluate the effectiveness of {\em
RouteScope\/} using both BGP tables and the AS paths
collected from public BGP gateways. Our results show
that it achieves 70\%--88\% accuracy in path
inference.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "AS-level path; border gateway protocol; internet
routing; network topology",
}
@Article{Zhao:2005:DSA,
author = "Qi (George) Zhao and Abhishek Kumar and Jia Wang and
Jun (Jim) Xu",
title = "Data streaming algorithms for accurate and efficient
measurement of traffic and flow matrices",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "350--361",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1071690.1064258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The traffic volume between origin/destination (OD)
pairs in a network, known as traffic matrix, is
essential for efficient network provisioning and
traffic engineering. Existing approaches of estimating
the traffic matrix, based on statistical inference
and/or packet sampling, usually cannot achieve very
high estimation accuracy. In this work, we take a brand
new approach in attacking this problem. We propose a
novel data streaming algorithm that can process traffic
stream at very high speed (e.g., 40 Gbps) and produce
traffic digests that are orders of magnitude smaller
than the traffic stream. By correlating the digests
collected at any OD pair using Bayesian statistics, the
volume of traffic flowing between the OD pair can be
accurately determined. We also establish principles and
techniques for optimally combining this streaming
method with sampling, when sampling is necessary due to
stringent resource constraints. In addition, we propose
another data streaming algorithm that estimates {\em
flow matrix}, a finer-grained characterization than
traffic matrix. Flow matrix is concerned with not only
the total traffic between an OD pair (traffic matrix),
but also how it splits into flows of various sizes.
Through rigorous theoretical analysis and extensive
synthetic experiments on real Internet traffic, we
demonstrate that these two algorithms can produce very
accurate estimation of traffic matrix and flow matrix
respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; network measurement; sampling;
statistical inference; traffic matrix",
}
@Article{Soule:2005:TMB,
author = "Augustin Soule and Anukool Lakhina and Nina Taft and
Konstantina Papagiannaki and Kave Salamatian and
Antonio Nucci and Mark Crovella and Christophe Diot",
title = "Traffic matrices: balancing measurements, inference
and modeling",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "362--373",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic matrix estimation is well-studied, but in
general has been treated simply as a statistical
inference problem. In practice, however, network
operators seeking traffic matrix information have a
range of options available to them. Operators can
measure traffic flows directly; they can perform
partial flow measurement, and infer missing data using
models; or they can perform no flow measurement and
infer traffic matrices directly from link counts. The
advent of practical flow measurement makes the study of
these tradeoffs more important. In particular, an
important question is whether judicious modeling,
combined with partial flow measurement, can provide
traffic matrix estimates that are significantly better
than previous methods at relatively low cost. In this
paper we make a number of contributions toward
answering this question. First, we provide a taxonomy
of the kinds of models that may make use of partial
flow measurement, based on the nature of the
measurements used and the spatial, temporal, or
spatio-temporal correlation exploited. We then evaluate
estimation methods which use each kind of model. In the
process we propose and evaluate new methods, and
extensions to methods previously proposed. We show
that, using such methods, small amounts of traffic flow
measurements can have significant impacts on the
accuracy of traffic matrix estimation, yielding results
much better than previous approaches. We also show that
different methods differ in their bias and variance
properties, suggesting that different methods may be
suited to different applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Internet traffic matrix estimation; Kalman filtering;
principal components analysis; statistical inference;
traffic characterization",
}
@Article{Ganeriwal:2005:RAT,
author = "Saurabh Ganeriwal and Deepak Ganesanl and Mark Hansen
and Mani B. Srivastava and Deborah Estrin",
title = "Rate-adaptive time synchronization for long-lived
sensor networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "374--375",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064261",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Time synchronization is critical to sensor networks at
many layers of its design and enables better
duty-cycling of the radio, accurate localization,
beamforming and other collaborative signal processing.
While there has been significant work in sensor network
synchronization, measurement based studies have been
restricted to very short-term (few minutes) datasets
and have focused on obtaining accurate instantaneous
synchronization. Long-term synchronization has
typically been handled by periodic re-synchronization
schemes with beacon intervals of a few minutes based on
the assumption that long-term drift is too hard to
model and predict. Thus, none of this work exploits the
temporally correlated behavior of the clock drift. Yet,
there are incredible energy gains to be achieved from
better modeling and prediction of long-term drift that
can provide bounds on long-term synchronization error
across a sensor network. Better synchronization can
lead to significantly lower duty-cycles of the radio,
simplify signal processing and can enable an order of
magnitude greater lifetime than current techniques. We
measure, evaluate and analyze in-depth the long-term
behavior of synchronization skew and drift on typical
Mica sensor nodes and develop an efficient long-term
time synchronization protocol. We use four real time
data sets gathered over periods of 12-30 hours in
different environmental conditions to study the
interplay between three key parameters that influence
long-term synchronization --- synchronization rate,
history of past synchronization beacons and the
estimation scheme. We use this measurement-based study
to design an online adaptive time-synchronization
algorithm that can adapt to changing clock drift and
environmental conditions while achieving
application-specified precision with very high
probability. We find that our algorithm achieves
between one and two orders of magnitude improvement in
energy efficiency over currently available
time-synchronization approaches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clock drift; sensor networks; time synchronization",
}
@Article{Wang:2005:IPS,
author = "An-I A. Wang and Peter Reiher and Geoff Kuenning",
title = "Introducing permuted states for analyzing conflict
rates in optimistic replication",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "376--377",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064262",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical modeling; conflict rates; optimistic
replication; permuted states; simulation",
}
@Article{Mickens:2005:PNA,
author = "James W. Mickens and Brian D. Noble",
title = "Predicting node availability in peer-to-peer
networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "378--379",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unlike the well-administered servers in traditional
distributed systems, machines in peer-to-peer networks
have widely varying levels of availability. Accurate
modeling of node uptime is crucial for predicting
per-machine resource burdens and selecting appropriate
data replication strategies. In this research project,
we improve upon the accuracy of previous peer-to-peer
availability models, which are often too conservative
to dynamically predict system availability at a
fine-grained level. We test our predictors on
availability traces from the PlanetLab distributed test
bed and the Microsoft corporate network. Each trace has
a distinct predictability profile, and we explain these
differences by examining the fundamental uptime classes
contained in each trace. We also show how
availability-guided replica placement reduces the
amount of object copying in a distributed data store.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "availability prediction; data availability;
distributed object stores; distributed system
simulation; machine availability",
}
@Article{Qiu:2005:TMW,
author = "Lili Qiu and Paramvir Bahl and Ananth Rao and Lidong
Zhou",
title = "Troubleshooting multihop wireless networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "380--381",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064264",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Effective network troubleshooting is critical for
maintaining efficient and reliable network operation.
Troubleshooting is especially challenging in multihop
wireless networks because the behavior of such networks
depends on complicated interactions between many
unpredictable factors such as RF noise, signal
propagation, node interference, and traffic flows. In
this paper we propose a new direction for research on
fault diagnosis in wireless networks. Specifically, we
present a diagnostic system that employs trace-driven
simulations to detect faults and perform root cause
analysis. We apply this approach to diagnose
performance problems caused by packet dropping, link
congestion, external noise, and MAC misbehavior. In a
25 node multihop wireless network, we are able to
diagnose over 10 simultaneous faults of multiple types
with more than 80\% coverage. Our framework is general
enough for a wide variety of wireless and wired
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multihop wireless networks; network diagnosis; network
management; simulation",
}
@Article{Raz:2005:FOM,
author = "David Raz and Benjamin Avi-Itzhak and Hanoch Levy",
title = "Fair operation of multi-server and multi-queue
systems",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "382--383",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064265",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work aims at studying the fairness of multi-queue
and multi-server queueing systems. We deal with the
issues of queue-multiplicity, queue joining policy and
queue jockeying and use a quantitative measure (RAQFM)
to evaluate them. Our results yield the relative
fairness of the mechanisms as a function of the system
configuration and parameters. Practitioners can use
these results to {\em quantitatively\/} account for
system fairness and to weigh efficiency aspects versus
fairness aspects in designing and controlling their
queueing systems. In particular, we quantitatively
demonstrate that: (1) Joining the shortest queue
increases fairness, (2) A single `combined' queue
system is more fair than `separate' (multi) queue
system and (3) Jockeying from the head of a queue is
more fair than jockeying from its tail.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; FCFS; job scheduling; multi-queue;
multi-server; resource allocation; unfairness",
}
@Article{Anderson:2005:DSA,
author = "Eric Anderson and Dirk Beyer and Kamalika Chaudhuri
and Terence Kelly and Norman Salazar and Cipriano
Santos and Ram Swaminathan and Robert Tarjan and Janet
Wiener and Yunhong Zhou",
title = "Deadline scheduling for animation rendering",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "384--385",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064266",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "animation rendering; deadline scheduling; simulation",
}
@Article{He:2005:SSP,
author = "Simin He and Shutao Sun and Wei Zhao and Yanfeng Zheng
and Wen Gao",
title = "Smooth switching problem in buffered crossbar
switches",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "386--387",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064267",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalability considerations drive the switch fabric
design to evolve from output queueing to input queueing
and further to combined input and crosspoint queueing
(CICQ). However, few CICQ switches are known with
guaranteed quality of service, and credit-based flow
control induces a scalability bottleneck. In this
paper, we propose a novel CICQ switch called the
smoothed buffered crossbar or sBUX, based on a new
design objective of smoothness and on a new rate-based
flow control scheme called the smoothed multiplexer or
sMUX. It is proved that with a buffer of just four
cells at each crosspoint, sBUX can utilize 100\% of the
switch capacity to provide deterministic guarantees of
bandwidth and fairness, delay and jitter bounds for
each flow. In particular, neither credit-based flow
control nor speedup is used, and arbitrary
fabric-internal latency is allowed between line cards
and the switch core.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "buffered crossbar; CICQ; scheduling; smoothness;
switch",
}
@Article{He:2005:PTT,
author = "Qi He and Constantinos Dovrolis and Mostafa Ammar",
title = "Prediction of {TCP} throughput: formula-based and
history-based methods",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "388--389",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064268",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chua:2005:SFE,
author = "David Chua and Eric D. Kolaczyk and Mark Crovella",
title = "A statistical framework for efficient monitoring of
end-to-end network properties",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "390--391",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064269",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network service providers and customers are often
concerned with aggregate performance measures that span
multiple network paths. Unfortunately, forming such
network-wide measures can be difficult, due to the
issues of scale involved. As a result, it is of
interest to explore the feasibility of methods that
dramatically reduce the number of paths measured in
such situations while maintaining acceptable accuracy.
In previous work [4] we have proposed a statistical
framework for efficiently addressing this problem. The
key to our method lies in the observation and
exploitation of the fact that network paths show
significant redundancy (sharing of common links).We now
make three contributions in [3]: (1) we generalize the
framework to make it more immediately applicable to
network measurements encountered in practice; (2) we
demonstrate that the observed path redundancy upon
which our method is based is robust to variation in key
network conditions and characteristics, including the
presence of link failures; and (3) we show how the
framework may be applied to address three practical
problems of interest to network providers and
customers, using data from an operating network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "algorithms; networking; statistical analysis",
}
@Article{Zhu:2005:TSA,
author = "Ningning Zhu and Jiawu Chen and Tzi-cker Chiueh and
Daniel Ellard",
title = "{TBBT}: scalable and accurate trace replay for file
server evaluation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "392--393",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064270",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aging; benchmarks; file system evaluation; NFS; trace
play",
}
@Article{Sarat:2005:UAD,
author = "Sandeep Sarat and Vasileios Pappas and Andreas
Terzis",
title = "On the use of anycast in {DNS}",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "394--395",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064271",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the initial results from our evaluation
study on the performance implications of anycast in
DNS, using four anycast servers deployed at top-level
DNS zones. Our results show that 15\% to 55\% of the
queries sent to an anycast group, are answered by the
topologically closest server and at least 10\% of the
queries experience an additional delay in the order of
100ms. While increased availability is one of the
supposed advantages of anycast, we found that outages
can last up to multiple minutes, mainly due to slow BGP
convergence. On the other hand, the number of outages
observed was fairly small, suggesting that anycast
provides a generally stable service.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mudigonda:2005:MMA,
author = "Jayaram Mudigonda and Harrick M. Vin and Raj
Yavatkar",
title = "Managing memory access latency in packet processing",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "396--397",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064272",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this study, we refute the popular belief [1,2] that
packet processing does not benefit from data-caching.
We show that a small data-cache of 8KB can bring down
the packet processing time by much as 50-90\%, while
reducing the off-chip memory bandwidth usage by about
60-95\%. We also show that, unlike general-purpose
computing, packet processing, due to its
memory-intensive nature, cannot rely exclusively on
data-caching to eliminate the memory bottleneck
completely.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data-caches; multithreading; network processors",
}
@Article{Bharambe:2005:SOB,
author = "Ashwin R. Bharambe and Cormac Herley and Venkata N.
Padmanabhan",
title = "Some observations on {BitTorrent} performance",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "398--399",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064273",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a simulation-based study of
BitTorrent. Our results confirm that BitTorrent
performs near-optimally in terms of uplink bandwidth
utilization and download time, except under certain
extreme conditions. On fairness, however, our work
shows that low bandwidth peers systematically download
more than they upload to the network when high
bandwidth peers are present. We find that the {\em
rate-based\/} tit-for-tat policy is not effective in
preventing unfairness. We show how simple changes to
the tracker and a stricter, {\em block-based
tit-for-tat policy}, greatly improves fairness, while
maintaining high utilization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bandwidth utilization; BitTorrent; fairness",
}
@Article{Machiraju:2005:TPC,
author = "Sridhar Machiraju and Darryl Veitch and Fran{\c{c}}ois
Baccelli and Antonio Nucci and Jean C. Bolot",
title = "Theory and practice of cross-traffic estimation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "400--401",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064274",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Active probing heuristics are usually based on queuing
systems. However, a rigorous probabilistic treatment of
probing methods has been lacking. For instance, it is
not known even in principle, what can and cannot be
measured in general, nor the true limitations of
existing methods. We provide a probabilistic treatment
for the measurement of cross traffic in the 1-hop case.
We derive inversion formulae for the cross traffic
process, and explain their fundamental limits, using an
intuitive geometric framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active probing; cross-traffic estimation",
}
@Article{Stutzbach:2005:CTT,
author = "Daniel Stutzbach and Reza Rejaie",
title = "Characterizing the two-tier {Gnutella} topology",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "402--403",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064275",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Characterizing the properties of peer-to-peer (P2P)
overlay topologies in file-sharing applications is
essential for understanding their impact on the
network, identifying their performance bottlenecks in
practice, and evaluating their performance via
simulation. Such characterization requires accurate
snapshots of the overlay topology which is difficult to
capture due to the large size and dynamic nature.
Previous studies characterizing overlay topologies not
only are outdated but also rely on partial or
potentially distorted snapshots. In this extended
abstract, we briefly present the first characterization
of two-tier Gnutella topologies based on recent and
accurate snapshots.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Gnutella; peer-to-peer; topology",
}
@Article{Tewari:2005:ASR,
author = "Saurabh Tewari and Leonard Kleinrock",
title = "Analysis of search and replication in unstructured
peer-to-peer networks",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "404--405",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064276",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates the effect of the number of
file replicas on search performance in unstructured
peer-to-peer networks. We observe that for a search
network with a random graph topology where file
replicas are uniformly distributed, the hop distance to
a replica of a file is logarithmic in the number of
replicas. Using this observation we show that
flooding-based search is optimized when the number of
replicas is proportional to the file request rates.
This replica distribution is also optimal for download
time and since flooding has logarithmically better
search time than random walk under its optimal replica
distribution, we investigate the query-processing load
using this distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flooding; optimal file replication; peer-to-peer;
random graphs; replication; search performance;
unstructured networks",
}
@Article{Zhang:2005:ILS,
author = "Jianyong Zhang and Anand Sivasubramaniam and Alma
Riska and Qian Wang and Erik Riedel",
title = "An interposed 2-Level {I/O} scheduling framework for
performance virtualization",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "406--407",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064277",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; I/O scheduling; performance isolation;
quality of service; storage systems; virtualization",
}
@Article{Wenisch:2005:TAM,
author = "Thomas F. Wenisch and Roland E. Wunderlich and Babak
Falsafi and James C. Hoe",
title = "{TurboSMARTS}: accurate microarchitecture simulation
sampling in minutes",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "408--409",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent research proposes accelerating processor
microarchitecture simulation through statistical
sampling. Prior simulation sampling approaches
construct accurate model state for each measurement by
continuously warming large microarchitectural
structures (e.g., caches and the branch predictor)
while emulating the billions of instructions between
measurements. This approach, called functional warming,
occupies hours of runtime while the detailed simulation
that is measured requires mere minutes. To eliminate
the functional warming bottleneck, we propose
TurboSMARTS, a simulation framework that stores
functionally-warmed state in a library of small,
reusable checkpoints. TurboSMARTS enables the creation
of the thousands of checkpoints necessary for accurate
sampling by storing only the subset of warmed state
accessed during simulation of each brief execution
window. TurboSMARTS matches the accuracy of prior
simulation sampling techniques (i.e., $ \pm $3\% error
with 99.7\% confidence), while estimating the
performance of an 8-way out-of-order super-scalar
processor running SPEC CPU2000 in 91 seconds per
benchmark, on average, using a 12 GB checkpoint
library.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "checkpointed microarchitecture simulation; simulation
sampling",
}
@Article{Hu:2005:RCM,
author = "Chunyu Hu and Jennifer C. Hou",
title = "A reactive channel model for expediting wireless
network simulation",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "410--411",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064279",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major problem with leveraging event-driven,
packet-level simulation environments, such as {\em
ns2\/} [6], {\em J-Sim\/} [1], {\em OpNet\/} [2], and
{\em QualNet\/} [3], in conducting wireless network
simulation is the vast number of events generated, a
majority of which are related to signal transmission in
the PHY/MAC layers. In this extended abstract, we
investigate the operations of signal transmission in
the various stages: {\em signal propagation}, {\em
signal interference}, and {\em interaction with the
PHY/MAC layers}, and identify where events can be
reduced without impairing the accuracy. We propose to
leverage the MAC/PHY state information, and devise
(from the perspective of network simulation) a reactive
channel model (RCM) in which nodes explicitly {\em
register\/} their interests in receiving certain events
according to the MAC/PHY states they are in and the
corresponding operations that should be performed. The
simulation study indicates that RCM renders an order of
magnitude of speed-up without compromising the accuracy
of simulation results. An advantage of RCM with respect
to the implementation is that there is no need to
re-design the channel model for each specific MAC
layer, and the modification made in the MAC/PHY layers
is quite modest (e.g., a few API changes). This,
coupled with the performance gain, suggests that RCM is
an attractive, light-weight mechanism for expediting
wireless network simulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel model; network simulation; reactive;
scalability",
}
@Article{Groenevelt:2005:MDM,
author = "Robin Groenevelt and Philippe Nain and Ger Koole",
title = "Message delay in {MANET}",
journal = j-SIGMETRICS,
volume = "33",
number = "1",
pages = "412--413",
month = jun,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1064212.1064280",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A generic stochastic model with only two input
parameters is introduced to evaluate the message delay
in mobile ad hoc networks (MANETs) where nodes may
relay messages. The Laplace--Stieltjes transform (LST)
of the message delay is obtained for two protocols: the
two-hop and the unrestricted multicopy protocol. From
these results we deduce the expected message delays. It
is shown that, despite its simplicity, the model
accurately predicts the message delay under both relay
strategies for a number of mobility models (the random
waypoint, random direction and the random walker
mobility models).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay; estimation; mobile ad hoc; modeling; networks;
performance prediction; statistics",
}
@Article{Squillante:2005:SIW,
author = "Mark S. Squillante",
title = "Special issue on the workshop on {MAthematical
performance Modeling And Analysis (MAMA 2005)}: {Guest
Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "2--2",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101893",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carofiglio:2005:SPA,
author = "Giovanna Carofiglio and Rossano Gaeta and Michele
Garetto and Paolo Giaccone and Emilio Leonardi and
Matteo Sereno",
title = "A statistical physics approach for modelling {P2P}
systems",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "3--5",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We apply basic concepts of statistical physics to
devise an approximate model describing the dynamics of
content diffusion in large peer-to-peer networks. Our
approach is based on fluid-diffusive equations, whose
solution can be obtained by numerical evaluation with a
complexity independent of the number of users and
contents, thus allowing to analyze very large systems.
The model is general and modular, and can incorporate
the effect of both search and download processes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sundararaj:2005:OPA,
author = "Ananth I. Sundararaj and Manan Sanghi and John R.
Lange and Peter A. Dinda",
title = "An optimization problem in adaptive virtual
environments",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "6--8",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101895",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A virtual execution environment consisting of virtual
machines (VMs) interconnected with virtual networks
provides opportunities to dynamically optimize, at
run-time, the performance of existing, {\em
unmodified\/} distributed applications without any user
or programmer intervention. Along with resource
monitoring and inference and application-independent
adaptation mechanisms, efficient adaptation algorithms
are key to the success of such an effort. In previous
work we have described our measurement and inference
framework, explained our adaptation mechanisms, and
proposed simple heuristics as adaptation algorithms.
Though we were successful in improving performance as
compared to the case with no adaptation, none of our
algorithms were characterized by theoretically proven
bounds. In this paper, we formalize the adaptation
problem, show that it is NP-hard and propose research
directions for coming up with an efficient solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nicol:2005:OPC,
author = "David M. Nicol",
title = "Optimized pre-copy calibration of hard drives",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "9--11",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In certain contexts a small window of time exists
during which law enforcement has access to a hard-drive
suspected of containing important information. Given
legal authority to copy or seize this disk, a decision
must be made whether to use that access time to make a
copy of the disk (which may take more than an hour,
depending on the size of the disk) and leave its owner
unaware that it has been copied. The copying operation
uses especially fast drivers that bypass normal error
correction mechanisms. Therefore, for the copy to be
successful it is necessary that the disk onto which the
copy is placed yield exactly the same bits on
subsequent reads as would the original disk. To gain
confidence that the copy will be successful the copying
software typically chooses some sectors at random,
copies them, and determines whether their copies are
identical to the original. We address the problem of
quantifying the conditional probability that the disk
will copy correctly given that some samples have copied
correctly, as a function of the, number and placement
of those samples. Our framework allows us then to
choose the placement of those samples in such a way
that this conditional probability is maximized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kumaran:2005:SAC,
author = "J. Kumaran and K. Mitchell and A. van de Liefvoort",
title = "A spectral approach to compute performance measures in
a correlated single server queue",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "12--14",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101897",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The coupling matrix was introduced in [8] to compute
the performance measures of a PH/PH/1 single server
queue. This matrix was extended in [1, 2] to include
arrival and service processes that are possibly
serially correlated processes, although the service
process remains independent of the arrival process and
all marginal distributions are matrix exponential, and
this current paper is an extended abstract of [2]. The
coupling matrix is constructed from the arrival and the
service distributions without any computational effort,
and the performance measures (such as waiting times and
queue length distributions) are derived directly from
its spectrum.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiorini:2005:UCS,
author = "Pierre M. Fiorini and Robert Sheahan and Lester
Lipsky",
title = "On unreliable computing systems when heavy-tails
appear as a result of the recovery procedure",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "15--17",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For some computing systems, failure is rare enough
that it can be ignored. In other systems, failure is so
common that how to handle it can have a significant
impact on the performance of the system. There are many
different recovery schemes for tasks, however, they can
be classified into three broad categories: (1) {\em
Resume\/}: when a task fails, it knows exactly where it
stops and can continue at that point when allowed to
resume (i.e., {\em preemptive resume --- (prs)\/}); (2)
{\em Replace\/}: when a task fails, then later when the
processor continues, it begins with a brand new task
(i.e., {\em preemptive repeat different (prd)\/};) and,
(3) {\em Restart\/}: when a task fails it loses all
work done to that point and must start anew upon
continuing later (i.e., {\em preemptive repeat
identical --- pri\/}).In this paper, assuming a
computing system is unreliable, we discuss how {\em
heavy-tail\/} (hereafter referred to as {\em
power-tail\/} --- PT) distributions can appear in a
job's task stream given the {\em Restart\/} recovery
procedure. This is an important consideration since it
is known that power-tails can lead to unstable systems
[4], We then demonstrate how to obtain performance and
dependability measures for a class of computing systems
comprised of $P$ unreliable processors and a finite
number of tasks $N$ given the above recovery
procedures.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2005:MDP,
author = "Qi Zhang and Armin Heindl and Evgenia Smirni",
title = "Models of the departure process of a {BMAP/MAP/1}
queue",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "18--20",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a family of finite approximations for the
departure process of a BMAP/MAP/1 queue. The departure
process approximations are derived via an exact
aggregate solution technique (called ETAQA) applied to
M/G/1-type Markov processes. The proposed
approximations are indexed by a parameter $n$ ($ n <
1$), which determines the size of the output model as $
n + 1$ block levels of the M/G/1-type process. This
output approximation preserves exactly the marginal
distribution of the true departure process and the lag
correlations of the inter-departure times up to lag $ n
- 2$. Experimental results support the applicability of
the proposed approximation in traffic-based
decomposition of queueing networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramachandran:2005:PBA,
author = "Krishna K. Ramachandran and Biplab Sikdar",
title = "A population based approach to model network lifetime
in wireless sensor networks",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "21--23",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The physical constraints of battery-powered sensors
impose limitations on their processing capacity and
longevity. As battery power in the nodes decays,
certain parts of the network may become disconnected or
the coverage may shrink, thereby reducing the
reliability and the potency of the sensor network.
Since sensor networks operate unattended and without
maintenance, it is imperative that network failures are
detected early enough so that corrective measures can
be taken. Existing research has primarily concentrated
on developing algorithms, be it distributed or
centralized, to optimize network longevity metrics. For
instance, [4, 5] propose MAC layer optimizations to
prolong longevity, while [7, 6] look at the problem
from a Layer 3 perspective. Works along the lines of
actually building network models for energy consumption
are addressed in [2], [3], but these models fail to
capture the interplay between a node's spatial location
and it's energy consumption. In our current work, we
develop an unifying framework to characterize the
lifetime of such energy constrained networks, and
obtain insights into their working. In particular, we
employ a framework similar to population models for
biological systems, to model the network lifetime. We
consider both {\em spatial\/} scenarios, where a node's
power consumption is governed by it's position in space
as well as {\em nonspatial\/} scenarios, where the
node's location and power consumption model are
independent entities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kamra:2005:DPS,
author = "Abhinav Kamra and Jon Feldman and Vishal Misra and Dan
Rubenstein",
title = "Data persistence in sensor networks: towards optimal
encoding for data recovery in partial network
failures",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "24--26",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101901",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sensor networks consist of a number of sensors spread
across a geographical area. Each sensor has
communication capability and some level of intelligence
for signal processing and networking of data. Each
sensor node in the network routinely `senses' and
stores data from its immediate environment. An
important requirement of the sensor network is that the
collected data be disseminated to the proper end users.
In some cases, there are fairly strict requirements on
this communication. For example, the detection of an
intruder in a surveillance network should be
immediately communicated to the police authorities.
Each sensor node also has some storage capacity to
store the collected data or to assemble the data prior
to communicating it to another node.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2005:ION,
author = "Wenjie Jiang and John C. S. Lui and Dah-Ming Chiu",
title = "Interaction of overlay networks: properties and
implications",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "27--29",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although the concept of application layer overlay
routing has received much attention lately, there has
been little focus on the {\em `coexistence'\/} and {\em
`interaction'\/} of overlays on top of the same
physical network. In this paper, we show that when each
overlay plays the optimal routing strategy so as to
optimize its own performance, there exists an
equilibrium point for the overall routing strategy.
However, the equilibrium may be {\em inefficient:\/}
(a) it may not be Pareto optimal, (b) some fairness
anomalies of resource allocation may occur. This is
worthy of attention since overlays can be easily
deployed and overlays may not know the existence of
each other, they may continue to operate at a
sub-optimal point.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2005:CNC,
author = "Richard T. B. Ma and Vishal Misra and Dan Rubenstein",
title = "Cooperative and non-cooperative models for
slotted-{Aloha} type {MAC} protocols",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "30--32",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Aloha [1] and its slotted variation [3] have been
widely deployed as a medium access control (MAC)
protocol for different communication networks.
Slotted-Aloha type MAC protocols don't perform carrier
sensing and synchronize the transmissions into
time-slots. These protocols are suitable for
controlling, multiple accesses when nodes cannot sense
each other. Recent development of wireless and sensor
networks urges us to re-investigate slotted-Aloha type
MAC, and to design its variations for these new
trends.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Covell:2005:PMS,
author = "Michele Covell and Sumit Roy and Beomjoo Seo",
title = "Predictive modeling of streaming servers",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "33--35",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe our approach to deriving
saturation models for streaming servers from
vector-labeled training data. If a streaming server is
driven into saturation by accepting too many clients,
the quality of service degrades across the sessions.
The actual saturating load on a streaming server
depends on the detailed characteristics of the client
requests: the content location (local disk or stream
relay), the relative popularity, and the bit and packet
rates [1]. Previous work in streaming-server models has
used carefully selected, low-dimensional measurements,
such as client jitter and rebuffering counts [2], or
server memory usage [3]. In contrast, we collect 30
distinct low-level measures and 210 nonlinear
derivative measures each second. This provides us with
robustness against outliers, without reducing
sensitivity or responsiveness to changes in load. Since
the measurement dimensionality is so high, our approach
requires the modeling and learning framework described
in this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2005:RTP,
author = "Mor Harchol-Balter and Takayuki Osogami and Alan
Scheller-Wolf",
title = "Robustness of threshold policies in beneficiary-donor
model",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "36--38",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101905",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common problem in multiserver systems is deciding
how to allocate resources among jobs so as to minimize
mean response time. Since good parameter settings
typically depend on environmental conditions such as
system loads, an allocation policy that is optimal in
one environment may provide poor performance when the
environment changes, or when the prediction of the
environment is wrong. We say that such a policy is not
{\em robust.\/} In this paper, we analytically compare
the robustness of several threshold-based allocation
policies, in a dual server beneficiary-donor model. We
introduce two types of robustness: {\em static
robustness}, which measures robustness against
mis-estimation of the true load, and {\em dynamic
robustness}, which measures robustness against
fluctuations in the load. We find that policies
employing multiple thresholds offer significant benefit
over single threshold policies with respect to static
robustness. Yet they surprisingly offer much less
benefit with respect to dynamic robustness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raz:2005:LRU,
author = "David Raz and Benjamin Avi-Itzhak and Hanoch Levy",
title = "Locality of reference and the use of sojourn time
variance for measuring queue unfairness: extended
abstract",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "39--41",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The variance of customer sojourn time (or waiting
time) is used, either explicitly or implicitly, as an
indication of fairness for as long as queueing theory
exists. In this work we demonstrate that this quantity
has a disadvantage as a fairness measure, since it is
not local to the busy period in which it is measured.
It therefore may account for customer discrepancies
which are not relevant to fairness of scheduling. We
show that RAQFM, a recently proposed job fairness
measure, does possess such a locality property. We
further show that within a large class of fairness
measures RAQFM is unique in possessing this property.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2005:DSO,
author = "Yingdong Lu and Mark S. Squillante",
title = "Dynamic scheduling to optimize utility functions of
sojourn time moments in queueing systems",
journal = j-SIGMETRICS,
volume = "33",
number = "2",
pages = "42--44",
month = sep,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1101892.1101907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:33 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is well known that scheduling the service of
customers according to the shortest remaining
processing time (SRPT) policy is optimal with respect
to minimizing the mean sojourn time of customers.
Recent studies have further argued that SRPT does not
unfairly penalize large customers in order to benefit
small customers, and therefore these studies propose
the use of SRPT to improve performance in various
applications. However, as Schrage and Miller point out
[10], the SRPT policy can raise several difficulties
for a number of important reasons. Such difficulties
can arise from the inability to accurately predict
service times, or the complicated nature of
implementing the preemptive aspect of the SRPT policy
which requires keeping track of the remaining service
times of all waiting customers as well as of the
customer in service. Normally, preemption also incurs
additional costs. and thus one might want to avoid the
preemption of customers in service whose remaining
service time is not much larger than that of a new
arrival.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papagiannaki:2005:GEF,
author = "Konstantina Papagiannaki and Yin Zhang",
title = "{Guest Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "2--2",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, large scale network inference has
attracted significant interest within the research
community. On one front, considerable progress has been
made on traffic matrix estimation. Solutions have been
proposed to estimate the amount of traffic flowing
between any pair of ingress and egress points within an
IP network simply based on the total amount of traffic
recorded over IP links. On another front, efforts are
being made to detect the state of the network from end
to end measurements using inference techniques or to
infer the traffic workload by exploiting application
behavior. In essence, the full instrumentation of the
state of an IP network is still considered a cost
prohibitive task and inference may be the only tool we
have to understand the behavior of such large scale
systems. The potential benefits of the proposed
estimation techniques can be great. Accurate
measurement of an IP traffic matrix is essential for
network design and planning. Moreover, accurate
estimation of the network state can facilitate
troubleshooting and performance evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chandramouli:2005:ANC,
author = "Y. Chandramouli and Arnold Neidhardt",
title = "Analysis of network congestion inference techniques",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "3--9",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a neutral observer monitoring network
performance based on external network measurements.
Whenever congestion symptoms are observed within the
network, the neutral observer would be interested in
diagnosing the cause of the symptom, and in particular,
identifying the congested link within the network. The
neutral observer may contemplate to collect external
network measurements reflective of network performance
and from those measurements infer link delays to
identify the congested link. Given the measurements
collected, the following result has been obtained in
this article. We prove that it is not possible to
determine one-way link delays based on external network
delay measurements. It is important to note that it is
possible to determine one-way link delays with more
information such as historical data or additional
assumptions about directional delays.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burch:2005:MLD,
author = "Hal Burch and Chris Chase",
title = "Monitoring link delays with one measurement host",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "10--17",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present RCM, a system to monitor link delays on a
network using a single measurement host. RCM is a
combination of a new measurement system and a new
network tomography technique. The measurement system
employs tunnels to connect to border routers where it
can source and sink measurements across the network.
RCM uses network tomography to calculate the delays
across individual network links from these
measurements. The network tomography technique expands
on previous linear algebra techniques to deal with the
limitations of the resulting data without assuming
either link delay symmetry or a particular topology.
The network tomographic technique is compared against
direct measurements in simulation to ensure accuracy.
RCM is deployed on a large ISP's network to diagnose
the cause of end-to-end delays, from which additional
results are presented. The results are compared against
known behaviors of the network to ensure the results
are consistent with those behaviors. The system is
analyzed for its ability to pin-point the cause of
changes in end-to-end delay.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2005:OCS,
author = "Baek-Young Choi and Supratik Bhattacharyya",
title = "Observations on {Cisco} sampled {NetFlow}",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "18--23",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111579",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic monitoring is an important first step for
network management and traffic engineering. With
high-speed Internet backbone links, efficient and
effective packet sampling is not only desirable, but
also increasingly becoming a necessity. The Sampled
NetFlow [10] is Cisco router's traffic measurement
functionality with static packet sampling for high
speed links. Since the utility of sampling depends on
the {\em accuracy\/} and {\em economy\/} of
measurement, it is important to understand sampling
error and measurement overhead. In this paper, we first
discuss fundamental limitations of sampling techniques
used in the Sampled NetFlow. We assess the accuracy of
the Sampled NetFlow by comparing its output with
complete packet traces [8] from an operational router.
We also show the overheads involved in the Sampled
NetFlow. We find that Sampled NetFlow performs
correctly without incurring dramatic overhead during
our experiments. However, a care should be taken in its
use, since the overhead is linearly proportional to the
number of flow records.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Soule:2005:TMT,
author = "Augustin Soule and Kav{\'e} Salamatian and Antonio
Nucci and Nina Taft",
title = "Traffic matrix tracking using {Kalman} filters",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "24--31",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we develop a new approach to monitoring
origin-destination flows in a large network. We start
by building a state space model for OD flows that is
rich enough to fully capture temporal and spatial
correlations. We apply a Kalman filter to our linear
dynamic system that can be used for both estimation and
prediction of traffic matrices. We call our system a
traffic matrix tracker due to its lightweight mechanism
for temporal updates that enables tracking traffic
matrix dynamics at small time scales. Our Kalman filter
approach allows us to go beyond traffic matrix
estimation in that our single system can also carry out
traffic prediction and yield confidence bounds on the
estimates, the predictions and the residual error
processes. We show that these elements provide key
functionalities needed by monitoring systems of the
future for carrying out anomaly detection. Using real
data collected from a Tier-1 ISP, we validate our
model, illustrate that it can achieve low errors, and
that our method is adaptive on both short and long
timescales.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lance:2005:RTT,
author = "Ryan Lance and Ian Frommer",
title = "Round-trip time inference via passive monitoring",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "32--38",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The round-trip time and congestion window are the most
important rate-controlling variables in TCP. We present
a novel method for estimating these variables from
passive traffic measurements. The method uses four
different techniques to infer the minimum round-trip
time based the pacing of a limited number of packets.
We then estimate the sequence of congestion windows and
round-trip times for the whole flow. We validate our
algorithms with the ns2 network simulator.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lawrence:2005:LAN,
author = "Earl Lawrence and George Michailidis and Vijay N.
Nair",
title = "Local area network analysis using end-to-end delay
tomography",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "39--45",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been considerable interest over the last few
years in collecting and analyzing Internet traffic data
in order to estimate quality of service parameters such
as packet loss rates and delay distributions. In this
paper, we focus on fast and efficient estimation
methods for network link delay distributions based on
end-to-end measurements obtained by probing the
underlying. We introduce a rigorous statistical
framework for designing the necessary probing
experiments and examine the properties of the proposed
estimators. The proposed framework and the resulting
methodology are validated using data collected on the
University of North Carolina (UNC) local area
network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tian:2005:TAL,
author = "Wenhong Tian",
title = "The transient analysis of loss networks",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "46--50",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Unlike stationary behavior, time-dependent blocking
probabilities for loss networks are not well understood
and little work has been done except for the single
service center case. We propose novel closed-form
transient analysis methods for single Erlang loss
system and networks, to the best of our knowledge,
these are the most efficient ways to analyze the
transient behavior of Erlang loss system and networks.
Applying this model, time-dependent provisioning can
satisfy dynamically changed traffic demands and avoid
overprovisioning problem in connection-oriented loss
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fiedler:2005:TMT,
author = "Daniel Fiedler and Kristen Walcott and Thomas
Richardson and Gregory M. Kapfhammer and Ahmed Amer and
Panos K. Chrysanthis",
title = "Towards the measurement of tuple space performance",
journal = j-SIGMETRICS,
volume = "33",
number = "3",
pages = "51--62",
month = dec,
year = "2005",
CODEN = "????",
DOI = "https://doi.org/10.1145/1111572.1111574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:35 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many applications rely upon a tuple space within
distributed system middleware to provide loosely
coupled communication and service coordination. This
paper describes an approach for measuring the
throughput and response time of a tuple space when it
handles concurrent local space interactions.
Furthermore, it discusses a technique that populates a
tuple space with tuples before the execution of a
benchmark in order to age the tuple space and provide a
worst-case measurement of space performance. We apply
the tuple space benchmarking and aging methods to the
measurement of the performance of a JavaSpace, a
current example of a tuple space that integrates with
the Jini network technology. The experiment results
indicate that: (i) the JavaSpace exhibits limited
scalability as the number of concurrent interactions
from local space clients increases, (ii) the aging
technique can operate with acceptable time overhead,
and (iii) the aging technique does ensure that the
results from benchmarking capture the worst-case
performance of a tuple space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2006:GEF,
author = "Alma Riska and Erik Riedel",
title = "{Guest Editor}'s foreword: bigger and faster and
smaller",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "2--3",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years storage systems have evolved
dramatically as a result of both social and technical
advances. Storage systems and storage devices are found
in almost any computing installation from large
centralized and distributed enterprise systems to a
variety of mobile consumer electronic devices. Such a
wide deployment of storage has created a need to
re-evaluate basic solutions in storage systems design
and implementation. As part of this ongoing process of
technology evolution, it is critical to find a
framework to identify, understand, and evaluate a range
of issues. The reliability, availability, scalability,
performance, and power consumption characteristics of
storage systems must be considered in a variety of
traditional and emerging computing environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keeton:2006:CMD,
author = "Kimberly Keeton and Arif Merchant",
title = "Challenges in managing dependable data systems",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "4--10",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent work shows how to automatically design storage
systems that meet performance and dependability
requirements by appropriately selecting and configuring
storage devices, and creating snapshot, remote mirror,
and traditional backup copies. Although this work
represents a solid foundation, users demand an even
higher level of functionality: the ability to
cost-effectively manage data according to
application-centric (or better, business
process-centric) performance, dependability and
manageability requirements, as these requirements
evolve over the data's lifetime. In this paper, we
outline several research challenges in managing
dependable data systems, including capturing users'
high-level goals; translating them into storage-level
requirements; and designing, deploying, and analyzing
the resulting data systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2006:ACT,
author = "Jianyong Zhang and Prasenjit Sarkar and Anand
Sivasubramaniam",
title = "Achieving completion time guarantees in an
opportunistic data migration scheme",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "11--16",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today's data centers are in a constant state of
evolution because of equipment refreshes and the move
to tiered storage. Data migration is a very important
activity in this environment as data moves from one
storage device to another without disrupting access
from applications. This paper presents the design,
implementation, and evaluation of a migration scheme
that provides completion time guarantees for a
migration task and also minimizes its impact on
foreground applications. This scheme is based on an
opportunistic data migration scheme that consider
migration as background activities. To make sure that a
migration task obeys a completion time constraint, an
adaptive rate control mechanism is presented. The
scheme uses various statistical techniques to estimate
system capacities, and utilize these estimates to
regulate foreground activities. Trace-driven
experimental evaluation shows that our migration scheme
is able to ensure that the migration task completes in
time while minimizing the impact on foreground
application activity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thomasian:2006:MLR,
author = "Alexander Thomasian",
title = "Multi-level {RAID} for very large disk arrays",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "17--22",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138091",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Very Large Disk Arrays --- VLDAs have been developed
to cope with the rapid increase in the volume of data
generated requiring ultrareliable storage. Bricks or
Storage Nodes --- SNs holding a dozen or more disks are
cost effective VLDA building blocks, since they cost
less than traditional disk arrays. We utilize the
Multilevel RAID --- MRAID paradigm for protecting both
SNs and their disks. Each SN is a
$k$-disk-failure-tolerant kDFT array, while replication
or $l$-node failure tolerance --- $l$ NFTs paradigm is
applied at the SN level. For example, RAID1(M)/5(N)
denotes a RAID1 at the higher level with a degree of
replication $M$ and each virtual disk is an SN
configured as a RAID5 with $N$ physical disks. We
provide the data layout for RAID5/5 and RAID6/5 MRAIDs
and give examples of updating data and recovering lost
data. The former requires {\em storage transactions\/}
to ensure the atomicity of storage updates. We discuss
some weaknesses in reliability modeling in RAID5 and
give examples of an asymptotic expansion method to
compare the reliability of several MRAID organizations.
We outline the reliability analysis of Markov chain
models of VLDAs and briefly report on conclusions from
simulation results. In Conclusions we outline areas for
further research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mesnier:2006:RFM,
author = "Michael Mesnier and Matthew Wachs and Brandon Salmon
and Gregory R. Ganger",
title = "Relative fitness models for storage",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "23--28",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Relative fitness is a new black-box approach to
modeling storage devices. Whereas conventional
black-box models train to predict a device's
performance given `device-independent' workload
characteristics, relative fitness models learn to
predict the {\em changes\/} in performance between
specific devices. There are two advantages. First,
unlike conventional modeling, relative fitness does not
depend entirely on workload characteristics;
performance and resource utilization (e.g., cache
usage) can also be used to describe a workload. This is
beneficial when workload characteristics are difficult
to express (e.g., temporal locality). Second, because
relative fitness models are constructed for each pair
of devices, changes in workload characteristics (e.g.,
I/O inter-arrival delay) can be modeled. Therefore,
unlike a conventional model, a relative fitness model
can be used by applications with a {\em closed\/} I/O
arrival process. In this article, we present relative
fitness as an evolution of the conventional model and
share some early results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arpaci-Dusseau:2006:SSD,
author = "Andrea C. Arpaci-Dusseau and Remzi H. Arpaci-Dusseau
and Lakshmi N. Bairavasundaram and Timothy E. Denehy
and Florentina I. Popovici and Vijayan Prabhakaran and
Muthian Sivathanu",
title = "Semantically-smart disk systems: past, present, and
future",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "29--35",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138093",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we describe research that has been
on-going within our group for the past four years on
{\em semantically-smart disk systems}. A
semantically-smart system goes beyond typical
block-based storage systems by extracting higher-level
information from the stream of traffic to disk; doing
so enables new and interesting pieces of functionality
to be implemented within low-level storage systems. We
first describe the development of our efforts over the
past four years, highlighting the key technologies
needed to build semantically-smart systems as well as
the main weaknesses of our approach. We then discuss
future directions in the design and implementation of
smarter storage systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2006:BDS,
author = "Eitan Bachmat and Vladimir Braverman",
title = "Batched disk scheduling with delays",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "36--41",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "One of the important performance enhancing
capabilities of modern disk drives, is the ability to
permute the order of service of incoming I/O requests
in order to minimize total access time. Given a batch
(set) of I/O requests, the problem of finding the
optimal order of service is known as the {\em Batched
Disk Scheduling Problem\/} (BDSP). BDSP is a well known
instance of the Asymmetric Traveling Salesman Problem
(ATSP), in fact it has been used as one of a few
principal test cases for the examination of heuristic
algorithms for the ATSP, [4], [12]. To specify an
instance of BDSP amounts to a choice of a model for the
mechanical motion of the disk and a choice of locations
and lengths of the requested I/O in the batch. The
distance between requests is the amount of time needed
by the disk to move from the end of one request to the
beginning of the other, thus the amount of time needed
to read the data itself, {\em Transfer time}, is not
counted since it is independent of the order of the
requests, only the order dependent {\em Access time\/}
is computed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zarandioon:2006:OOD,
author = "Saman Zarandioon and Alexander Thomasian",
title = "Optimization of online disk scheduling algorithms",
journal = j-SIGMETRICS,
volume = "33",
number = "4",
pages = "42--46",
month = mar,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1138085.1138086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:36 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Disk scheduling policies have a significant effect on
disk performance. SPTF is one of the well-known
policies that can increase disk performance near to
optimality [1]. One of the drawbacks of the regular
implementation of SPTF is its high computational cost.
`The computational cost [of SPTF] (as indicated crudely
by our simulation times) is very high' [2]. This paper
shows that computational cost of SPTF is not the
characteristic of SPTF, but it is a matter of
implementation. The experience shows that this approach
can improve the efficiency over 80\% compared to
na{\"\i}ve implementation. Finally, an algorithm for
efficient implementation of lookahead algorithms is
introduced.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk scheduling; online scheduling; shortest
positioning time first; SPTF",
}
@Article{Reed:2006:PRU,
author = "Daniel A. Reed",
title = "Performance and reliability: the ubiquitous
challenge",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "1--2",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Legend says that Archimedes remarked, on the discovery
of the lever, `Give me a place to stand and I can move
the world.' Today, computing pervades all aspects of
society. `Science' and `computational science' have
become largely synonymous, and computing is the
intellectual lever that opens the pathway to discovery
in diverse domains. As new discoveries increasingly lie
at the interstices of traditional disciplines,
computing is also the enabler for scholarship in the
arts, humanities, creative practice and public policy.
Equally importantly, computing supports our critical
infrastructure, from monetary and communication systems
to the electric power grid. With such pervasive
dependence, computing system reliability and
performance are ever more critical. Although the mean
time before failure (MTBF) of commodity hardware
components (i.e., processors, disks, memories, power
supplies and networks) is high, their use in large,
mission critical systems can still lead to systemic
failures. Our thesis is that the `two worlds' of
software --- distributed systems and
sequential/parallel systems --- must meet, embodying
ideas from each, if we are to build resilient systems.
This talk surveys some of these challenges and presents
possible approaches for resilient design, ranging from
intelligent hardware monitoring and adaptation, through
low-overhead recovery schemes, statistical sampling and
differential scheduling and to alternative models of
system software, including evolutionary adaptation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Thereska:2006:STA,
author = "Eno Thereska and Brandon Salmon and John Strunk and
Matthew Wachs and Michael Abd-El-Malek and Julio Lopez
and Gregory R. Ganger",
title = "{Stardust}: tracking activity in a distributed storage
system",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "3--14",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140280",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance monitoring in most distributed systems
provides minimal guidance for tuning, problem
diagnosis, and decision making. Stardust is a
monitoring infrastructure that replaces traditional
performance counters with end-to-end traces of requests
and allows for efficient querying of performance
metrics. Such traces better inform key administrative
performance challenges by enabling, for example,
extraction of per-workload, per-resource demand
information and per-workload latency graphs. This paper
reports on our experience building and using end-to-end
tracing as an on-line monitoring tool in a distributed
storage system. Using diverse system workloads and
scenarios, we show that such fine-grained tracing can
be made efficient (less than 6\% overhead) and is
useful for on- and off-line analysis of system
behavior. These experiences make a case for having
other systems incorporate such an instrumentation
framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "end-to-end tracing; request causal chain; Ursa Minor",
}
@Article{Pinheiro:2006:ERC,
author = "Eduardo Pinheiro and Ricardo Bianchini and Cezary
Dubnicki",
title = "Exploiting redundancy to conserve energy in storage
systems",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "15--26",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140281",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper makes two main contributions. First, it
introduces Diverted Accesses, a technique that
leverages the redundancy in storage systems to conserve
disk energy. Second, it evaluates the previous
(redundancy-oblivious) energy conservation techniques,
along with Diverted Accesses, as a function of the
amount and type of redundancy in the system. The
evaluation is based on novel analytic models of the
energy consumed by the techniques. Using these energy
models and previous models of reliability,
availability, and performance, we can determine the
best redundancy configuration for new energy-aware
storage systems. To study Diverted Accesses for
realistic systems and workloads, we simulate a
wide-area storage system under two file-access traces.
Our modeling results show that Diverted Accesses is
more effective and robust than the redundancy-oblivious
techniques. Our simulation results show that our
technique can conserve 20-61\% of the disk energy
consumed by the wide-area storage system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk energy; energy management; energy modeling",
}
@Article{Modiano:2006:MTW,
author = "Eytan Modiano and Devavrat Shah and Gil Zussman",
title = "Maximizing throughput in wireless networks via
gossiping",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "27--38",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140283",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A major challenge in the design of wireless networks
is the need for distributed scheduling algorithms that
will efficiently share the common spectrum. Recently, a
few distributed algorithms for networks in which a node
can converse with at most a single neighbor at a time
have been presented. These algorithms guarantee 50\% of
the maximum possible throughput. We present the {\em
first distributed scheduling framework that guarantees
maximum throughput}. It is based on a combination of a
distributed matching algorithm and an algorithm that
compares and merges successive matching solutions. The
comparison can be done by a deterministic algorithm or
by randomized gossip algorithms. In the latter case,
the comparison may be inaccurate. Yet, we show that if
the matching and gossip algorithms satisfy simple
conditions related to their performance and to the
inaccuracy of the comparison (respectively), the
framework attains the desired throughput. It is shown
that the complexities of our algorithms, that achieve
nearly 100\% throughput, are comparable to those of the
algorithms that achieve 50\% throughput. Finally, we
discuss extensions to general interference models. Even
for such models, the framework provides a simple
distributed throughput optimal algorithm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithms; gossip algorithms; matching;
scheduling; stability; wireless networks",
}
@Article{Gao:2006:DEE,
author = "Yan Gao and Dah-Ming Chiu and John C. S. Lui",
title = "Determining the end-to-end throughput capacity in
multi-hop networks: methodology and applications",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "39--50",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140284",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a methodology to
analytically compute the {\em throughput capacity}, or
the maximum end-to-end throughput of a given source and
destination pair in a multi-hop wireless network. The
end-to-end throughput capacity is computed by
considering the interference due to neighboring nodes,
as well as various modes of hidden node interference.
Knowing the throughput capacity is important because it
facilitates the design of routing policy, admission
control for realtime traffic, as well as load control
for wireless networks. We model location-dependent
neighboring interference and we use a contention graph
to represent these interference relationships. Based on
the contention graph, we formulate the individual link
capacity as a set of fixed point equations. The
end-to-end throughput capacity can then be determined
once these link capacities are obtained. To illustrate
the utility of our proposed methodology, we present two
important applications: (a) {\em route optimization\/}
to determine the path with the maximum end-to-end
throughput capacity and, (b) {\em optimal offered load
control\/} for a given path so that the maximum
end-to-end capacity can be achieved. Extensive
simulations are carried out to verify and validate the
proposed analytical methodology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical model for 802.11 protocols; multi-hop ad
hoc wireless networks; throughput capacity",
}
@Article{Koksal:2006:ICV,
author = "Can Emre Koksal and Kyle Jamieson and Emre Telatar and
Patrick Thiran",
title = "Impacts of channel variability on link-level
throughput in wireless networks",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "51--62",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140285",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study analytically and experimentally the
throughput of the packetized time-varying discrete
erasure channel with feedback, which closely captures
the behavior of many practical physical layers. We
observe that the channel variability at different time
scales affects the link-level throughput positively or
negatively depending on its time scale. We show that
the increased variability in the channel at a time
scale smaller than a single packet increases the
link-level throughput, whereas the variability at a
time scale longer than a single packet reduces it. We
express the throughput as a function of the number of
transmissions per packet and evaluate it as in terms of
the cumulants of the samples of the stochastic
processes, which model the channel. We also illustrate
our results experimentally using mote radios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel modelling; channel variability; link
estimation",
}
@Article{Mishra:2006:POC,
author = "Arunesh Mishra and Vivek Shrivastava and Suman
Banerjee and William Arbaugh",
title = "Partially overlapped channels not considered harmful",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "63--74",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140286",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many wireless channels in different technologies are
known to have partial overlap. However, due to the
interference effects among such partially overlapped
channels, their simultaneous use has typically been
avoided. In this paper, we present a first attempt to
model partial overlap between channels in a systematic
manner. Through the model, we illustrate that the use
of partially overlapped channels is not always harmful.
In fact, a careful use of some partially overlapped
channels can often lead to significant improvements in
spectrum utilization and application performance. We
demonstrate this through analysis as well as through
detailed application-level and MAC-level measurements.
Additionally, we illustrate the benefits of our
developed model by using it to directly enhance the
performance of two previously proposed channel
assignment algorithms --- one in the context of
wireless LANs and the other in the context of multi-hop
wireless mesh networks. Through detailed simulations,
we show that use of partially overlapped channels in
both these cases can improve end-to-end application
throughput by factors between 1.6 and 2.7 in different
scenarios, depending on wireless node density. We
conclude by observing that the notion of partial
overlap can be the right model of flexibility to design
efficient channel access mechanisms in the emerging
software radio platforms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel assignment; IEEE 802.11; partially overlapped
channels",
}
@Article{Lieshout:2006:GSS,
author = "P. Lieshout and M. Mandjes and S. Borst",
title = "{GPS} scheduling: selection of optimal weights and
comparison with strict priorities",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "75--86",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140288",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system with two service classes with
heterogeneous traffic characteristics and
Quality-of-Service requirements. The available
bandwidth is shared between the two traffic classes in
accordance with the Generalized Processor Sharing (GPS)
discipline. GPS-based scheduling algorithms, such as
Weighted Fair Queueing, provide a popular mechanism for
service differentiation among heterogeneous traffic
classes. While the performance of GPS for given weights
has been thoroughly examined, the problem of selecting
weight values that maximize the traffic-carrying
capacity, has only received limited attention so far.
In the present paper, we address the latter problem for
the case of general Gaussian traffic sources. Gaussian
models cover a wide variety of both long-range
dependent and short-range dependent processes, and are
especially suitable at relatively high levels of
aggregation. In particular, we determine the realizable
region, i.e., the combinations of traffic sources that
can be supported for given Quality-of-Service
requirements in terms of loss and delay metrics. The
results yield the remarkable observation that simple
priority scheduling strategies achieve nearly the full
realizable region.$^1$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admissible region; Gaussian traffic; generalized
processor sharing; loss probabilities; priority
scheduling; weight setting",
}
@Article{Gromoll:2006:IRP,
author = "H. Christian Gromoll and Philippe Robert and Bert
Zwart and Richard Bakker",
title = "The impact of reneging in processor sharing queues",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "87--96",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140289",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate an overloaded processor sharing queue
with renewal arrivals and generally distributed service
times. Impatient customers may abandon the queue, or
renege, before completing service. The random time
representing a customer's patience has a general
distribution and may be dependent on his initial
service time requirement. We propose a scaling
procedure that gives rise to a fluid model, with
nontrivial yet tractable steady state behavior. This
fluid model captures many essential features of the
underlying stochastic model, and we use it to analyze
the impact of impatience in processor sharing queues.
We show that this impact can be substantial compared
with FCFS, and we propose a simple admission control
policy to overcome these negative impacts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; delay-differential equation; fluid
limits; measure valued process; processor sharing;
queues in overload; queues with impatience; user
behavior",
}
@Article{Yang:2006:TAP,
author = "Chang-Woo Yang and Adam Wierman and Sanjay Shakkottai
and Mor Harchol-Balter",
title = "Tail asymptotics for policies favoring short jobs in a
many-flows regime",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "97--108",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140290",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling policies that prioritize short jobs have
received growing attention in recent years. The class
of SMART policies includes many such disciplines, e.g.
Shortest-Remaining-Processing-Time (SRPT) and
Preemptive-Shortest-Job-First (PSJF). In this work, we
study the delay distribution of SMART policies and
contrast this distribution with that of the
Least-Attained-Service (LAS) policy, which indirectly
favors short jobs by prioritizing jobs with the least
attained service (age).We study the delay distribution
(rate function) of LAS and the SMART class in a
discrete-time queueing system under the many sources
regime. Our analysis in this regime (large capacity and
large number of flows) hinges on a novel two
dimensional queue representation, which creates
tie-break rules. These additional rules do not alter
the policies, but greatly simplify their analysis. We
demonstrate that the queue evolution of all the above
policies can be described under this single two
dimensional framework. We prove that all SMART policies
have the same delay distribution as SRPT and illustrate
the improvements SMART policies make over
First-Come-First-Served (FCFS). Furthermore, we show
that the delay distribution of SMART policies
stochastically improves upon the delay distribution of
LAS. However, the delay distribution under LAS is not
too bad --- the distribution of delay under LAS for
most jobs sizes still provides improvement over FCFS.
Our results are complementary to prior work that
studies delay-tail behavior in the large buffer regime
under a single flow.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "large--deviations; LAS; many--sources; rate function;
scheduling; SMART; SRPT",
}
@Article{Bonald:2006:LHT,
author = "Thomas Bonald and Aleksi Penttinen and Jorma Virtamo",
title = "On light and heavy traffic approximations of balanced
fairness",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "109--120",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140291",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Flow level analysis of communication networks with
multiple shared resources is generally difficult. A
recently introduced sharing scheme called balanced
fairness has brought these systems within the realm of
tractability. While straightforward in principle, the
numerical evaluation of practically interesting
performance metrics like per-flow throughput is
feasible for limited state spaces only, besides some
specific networks where the results are explicit. In
the present paper, we study the behaviour of balanced
fairness in light and heavy traffic regimes and show
how the corresponding performance results can be used
to approximate the flow throughput over the whole load
range. The results apply to any network, with a state
space of arbitrary dimension. A few examples are
explicitly worked out to illustrate the concepts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "balanced fairness; elastic traffic; flow level
analysis; throughput approximation",
}
@Article{Song:2006:NFF,
author = "Han Hee Song and Lili Qiu and Yin Zhang",
title = "{NetQuest}: a flexible framework for large-scale
network measurement",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "121--132",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140293",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present NetQuest, a flexible
framework for large-scale network measurement. We apply
{\em Bayesian experimental design\/} to select active
measurements that maximize the amount of information we
gain about the network path properties subject to given
resource constraints. We then apply {\em network
inference\/} techniques to reconstruct the properties
of interest based on the partial, indirect observations
we get through these measurements. By casting network
measurement in a general Bayesian decision theoretic
framework, we achieve flexibility. Our framework can
support a variety of design requirements, including (i)
differentiated design for providing better resolution
to certain parts of the network, (ii) augmented design
for conducting additional measurements given existing
observations, and (iii) joint design for supporting
multiple users who are interested in different parts of
the network. Our framework is also {\em scalable\/} and
can design measurement experiments that span thousands
of routers and end hosts. We develop a toolkit that
realizes the framework on PlanetLab. We conduct
extensive evaluation using both real traces and
synthetic data. Our results show that the approach can
accurately estimate network-wide and individual path
properties by only monitoring within 2-10\% of paths.
We also demonstrate its effectiveness in providing
differentiated monitoring, supporting continuous
monitoring, and satisfying the requirements of multiple
users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Bayesian experimental design; network inference;
network measurement; network tomography",
}
@Article{Zhao:2006:RTM,
author = "Qi Zhao and Zihui Ge and Jia Wang and Jun Xu",
title = "Robust traffic matrix estimation with imperfect
information: making use of multiple data sources",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "133--144",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140294",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Estimation of traffic matrices, which provide critical
input for network capacity planning and traffic
engineering, has recently been recognized as an
important research problem. Most of the previous
approaches infer traffic matrix from either SNMP link
loads or sampled NetFlow records. In this work, we
design novel inference techniques that, by
statistically correlating SNMP link loads and sampled
NetFlow records, allow for much more accurate
estimation of traffic matrices than obtainable from
either information source alone, even when sampled
NetFlow records are available at only a subset of
ingress. Our techniques are practically important and
useful since both SNMP and NetFlow are now widely
supported by vendors and deployed in most of the
operational IP networks. More importantly, this
research leads us to a new insight that SNMP link loads
and sampled NetFlow records can serve as `error
correction codes' to each other. This insight helps us
to solve a challenging open problem in traffic matrix
estimation, `How to deal with dirty data (SNMP and
NetFlow measurement errors due to
hardware/software/transmission problems)?' We design
techniques that, by comparing notes between the above
two information sources, identify and remove dirty
data, and therefore allow for accurate estimation of
the traffic matrices with the cleaned dat. We conducted
experiments on real measurement data obtained from a
large tier-1 ISP backbone network. We show that, when
full deployment of NetFlow is not available, our
algorithm can improve estimation accuracy significantly
even with a small fraction of NetFlow data. More
importantly, we show that dirty data can contaminate a
traffic matrix, and identifying and removing them can
reduce errors in traffic matrix estimation by up to an
order of magnitude. Routing changes is another a key
factor that affects estimation accuracy. We show that
using them as the a priori, the traffic matrices can be
estimated much more accurately than those omitting the
routing change. To the best of our knowledge, this work
is the first to offer a comprehensive solution which
fully takes advantage of using multiple readily
available data sources. Our results provide valuable
insights on the effectiveness of combining flow
measurement and link load measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network measurement; statistical inference; traffic
matrix",
}
@Article{Lall:2006:DSA,
author = "Ashwin Lall and Vyas Sekar and Mitsunori Ogihara and
Jun Xu and Hui Zhang",
title = "Data streaming algorithms for estimating entropy of
network traffic",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "145--156",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140295",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Using entropy of traffic distributions has been shown
to aid a wide variety of network monitoring
applications such as anomaly detection, clustering to
reveal interesting patterns, and traffic
classification. However, realizing this potential
benefit in practice requires accurate algorithms that
can operate on high-speed links, with low CPU and
memory requirements. In this paper, we investigate the
problem of estimating the entropy in a streaming
computation model. We give lower bounds for this
problem, showing that neither approximation nor
randomization alone will let us compute the entropy
efficiently. We present two algorithms for randomly
approximating the entropy in a time and space efficient
manner, applicable for use on very high speed (greater
than OC-48) links. The first algorithm for entropy
estimation is inspired by the structural similarity
with the seminal work of Alon et al. for estimating
frequency moments, and we provide strong theoretical
guarantees on the error and resource usage. Our second
algorithm utilizes the observation that the performance
of the streaming algorithm can be enhanced by
separating the high-frequency items (or elephants) from
the low-frequency items (or mice). We evaluate our
algorithms on traffic traces from different deployment
scenarios.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; traffic analysis",
}
@Article{Lee:2006:SEE,
author = "Sanghwan Lee and Zhi-Li Zhang and Sambit Sahu and
Debanjan Saha",
title = "On suitability of {Euclidean} embedding of {Internet}
hosts",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "157--168",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140296",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we investigate the suitability of
embedding Internet hosts into a Euclidean space given
their pairwise distances (as measured by round-trip
time). Using the classical scaling and matrix
perturbation theories, we first establish the (sum of
the) magnitude of {\em negative\/} eigenvalues of the
(doubly-centered, squared) distance matrix as a measure
of suitability of Euclidean embedding. We then show
that the distance matrix among Internet hosts contains
negative eigenvalues of {\em large magnitude}, implying
that embedding the Internet hosts in a Euclidean space
would incur relatively large errors. Motivated by
earlier studies, we demonstrate that the inaccuracy of
Euclidean embedding is caused by a large degree of {\em
triangle inequality violation\/} (TIV) in the Internet
distances, which leads to negative eigenvalues of large
magnitude. Moreover, we show that the TIVs are likely
to occur {\em locally}, hence, the distances among
these close-by hosts cannot be estimated accurately
using a {\em global\/} Euclidean embedding, in
addition, increasing the dimension of embedding does
not reduce the embedding errors. Based on these
insights, we propose a new hybrid model for embedding
the network nodes using only a 2-dimensional Euclidean
coordinate system and small {\em error adjustment
terms}. We show that the accuracy of the proposed
embedding technique is as good as, if not better, than
that of a 7-dimensional Euclidean embedding.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Euclidean embedding; suitability; triangle
inequality",
}
@Article{Casale:2006:EAE,
author = "Giuliano Casale",
title = "An efficient algorithm for the exact analysis of
multiclass queueing networks with large population
sizes",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "169--180",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140298",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce an efficient algorithm for the exact
analysis of closed multiclass product-form queueing
network models with large population sizes. We adopt a
novel approach, based on linear systems of equations,
which significantly reduces the cost of computing
normalizing constants. With the proposed algorithm, the
analysis of a model with $N$ circulating jobs of
multiple classes requires essentially the solution of
$N$ linear systems with order independent of population
sizes. A distinguishing feature of our approach is that
we can immediately apply theorems, solution techniques,
and decompositions for linear systems to queueing
network analysis. Following this idea, we propose a
block triangular form of the linear system that further
reduces the requirements, in terms of both time and
storage, of an exact analysis. An example illustrates
the efficiency of the resulting algorithm in presence
of large populations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computational algorithms; exact analysis; multiclass
models; normalizing constant; product-form queueing
networks",
}
@Article{VanVelthoven:2006:TAT,
author = "J. {Van Velthoven} and B. {Van Houdt} and C. Blondia",
title = "Transient analysis of tree-like processes and its
application to random access systems",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "181--190",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140299",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new methodology to assess transient performance
measures of tree-like processes is proposed by
introducing the concept of tree-like processes with
marked time epochs. As opposed to the standard
tree-like process, such a process marks part of the
time epochs by following a set of Markovian rules. Our
interest lies in obtaining the system state at the
$n$-th marked time epoch as well as the mean time at
which this $n$-th marking occurs. The methodology
transforms the transient problem into a stationary one
by applying a discrete Erlangization and constructing a
reset Markov chain. A fast algorithm, with limited
memory usage, that exploits the block structure of the
reset Markov chain is developed and is based, among
others, on Sylvester matrix equations and fast Fourier
transforms. The theory of tree-like processes
generalizes the well-known paradigm of
Quasi-Birth-Death Markov chains and has various
applications. We demonstrate our approach on the
celebrated Capetanakis--Tsybakov--Mikhailov (CTM)
random access protocol yielding new insights on its
initial behavior both in normal and overload
conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "contention resolution; Matrix analytic methods; random
access algorithms; transient analysis; tree-like
processes",
}
@Article{Buchholz:2006:BSR,
author = "Peter Buchholz",
title = "Bounding stationary results of {Tandem} networks with
{MAP} input and {PH} service time distributions",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "191--202",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140300",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a new approach to compute
bounds on stationary measures of queueing systems with
an input process described by a Markovian Arrival
Process (MAP) and a sequence of stations with Phase
Type (PH) service time distributions. Such queueing
systems cannot be solved exactly since they have an
infinite state space in several natural dimensions.
Based on earlier work on the computation of bounds for
specific classes of infinite Markov chains, the paper
presents a new approach specifically tailored to the
analysis of the mentioned class of queueing networks.
By increasing the size of the state space of the
aggregated Markov chain to be solved for bound
computation, bounds can be made arbitrarily tight, but
practical limits come up due to the computational
complexity. However, we show by means of several
examples that tight bounds can be derived with low
effort for a large set of queueing systems in the
mentioned class.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bounds; Markov chains; stationary analysis; Tandem
queues",
}
@Article{Gupta:2006:FCQ,
author = "Varun Gupta and Mor Harchol-Balter and Alan Scheller
Wolf and Uri Yechiali",
title = "Fundamental characteristics of queues with fluctuating
load",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "203--215",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140301",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Systems whose arrival or service rates fluctuate over
time are very common, but are still not well understood
analytically. Stationary formulas are poor predictors
of systems with fluctuating load. When the arrival and
service processes fluctuate in a Markovian manner,
computational methods, such as Matrix-analytic and
spectral analysis, have been instrumental in the
numerical evaluation of quantities like mean response
time. However, such computational tools provide only
limited insight into the {\em functional behavior\/} of
the system with respect to its primitive input
parameters: the arrival rates, service rates, and rate
of fluctuation. For example, the shape of the function
that maps rate of fluctuation to mean response time is
not well understood, even for an M/M/1 system. Is this
function increasing, decreasing, monotonic? How is its
shape affected by the primitive input parameters? Is
there a simple closed-form approximation for the shape
of this curve? Turning to user experience: How is the
performance experienced by a user arriving into a `high
load' period different from that of a user arriving
into a `low load' period, or simply a random user. Are
there stochastic relations between these? In this
paper, we provide the first answers to these
fundamental questions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluctuating load; MAP; MMPP; non-stationary
arrivals/service; Ross's conjecture; stochastic
ordering",
}
@Article{Narayanasamy:2006:ALO,
author = "Satish Narayanasamy and Cristiano Pereira and Harish
Patil and Robert Cohn and Brad Calder",
title = "Automatic logging of operating system effects to guide
application-level architecture simulation",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "216--227",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140303",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern architecture research relies heavily on
application-level detailed pipeline simulation. A time
consuming part of building a simulator is correctly
emulating the operating system effects, which is
required even if the goal is to simulate just the
application code, in order to achieve functional
correctness of the application's execution. Existing
application-level simulators require manually hand
coding the emulation of each and every possible system
effect (e.g., system call, interrupt, DMA transfer)
that can impact the application's execution. Developing
such an emulator for a given operating system is a
tedious exercise, and it can also be costly to maintain
it to support newer versions of that operating system.
Furthermore, porting the emulator to a completely
different operating system might involve building it
all together from scratch. In this paper, we describe a
tool that can automatically log operating system
effects to guide architecture simulation of application
code. The benefits of our approach are: (a) we do not
have to build or maintain any infrastructure for
emulating the operating system effects, (b) we can
support simulation of more complex applications on our
application-level simulator, including those
applications that use asynchronous interrupts, DMA
transfers, etc., and (c) using the system effects logs
collected by our tool, we can deterministically
re-execute the application to guide architecture
simulation that has reproducible results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "architecture simulation; checkpoints; emulating system
calls",
}
@Article{Guo:2006:AMC,
author = "Fei Guo and Yan Solihin",
title = "An analytical model for cache replacement policy
performance",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "228--239",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140304",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the increasing gap between CPU and memory
speed, cache performance plays an increasingly critical
role in determining the overall performance of
microprocessor systems. One of the important factors
that a affect cache performance is the cache
replacement policy. Despite the importance, current
analytical cache performance models ignore the impact
of cache replacement policies on cache performance. To
the best of our knowledge, this paper is the first to
propose an analytical model which predicts the
performance of cache replacement policies. The input to
our model is a simple circular sequence profiling of
each application, which requires very little storage
overhead. The output of the model is the predicted miss
rates of an application under different replacement
policies. The model is based on probability theory and
utilizes Markov processes to compute each cache access'
miss probability. The model realistic assumptions and
relies solely on the statistical properties of the
application, without relying on heuristics or rules of
thumbs. The model's run time is less than 0.1 seconds,
much lower than that of trace simulations. We validate
the model by comparing the predicted miss rates of
seventeen Spec2000 and NAS benchmark applications
against miss rates obtained by detailed
execution-driven simulations, across a range of
different cache sizes, associativities, and four
replacement policies, and show that the model is very
accurate. The model's average prediction error is
1.41\%,and there are only 14 out of 952 validation
points in which the prediction errors are larger than
10\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "analytical model; cache performance; replacement
policy",
}
@Article{Olshefski:2006:UMC,
author = "David Olshefski and Jason Nieh",
title = "Understanding the management of client perceived
response time",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "240--251",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140305",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding and managing the response time of web
services is of key importance as dependence on the
World Wide Web continues to grow. We present {\em
Remote Latency-based Management\/} (RLM), a novel
server-side approach for managing pageview response
times as perceived by remote clients, in real-time. RLM
passively monitors server-side network traffic,
accurately tracks the progress of page downloads and
their response times in real-time, and dynamically
adapts connection setup behavior and web page content
as needed to meet response time goals. To manage client
perceived pageview response times, RLM builds a novel
event node model to guide the use of several techniques
for manipulating the packet traffic in and out of a web
server complex, including fast SYN and SYN/ACK
retransmission, and embedded object removal and
rewrite. RLM operates as a stand-alone appliance that
simply sits in front of a web server complex, without
any changes to existing web clients, servers, or
applications. We have implemented RLM on an
inexpensive, commodity, Linux-based PC and present
experimental results that demonstrate its effectiveness
in managing client perceived pageview response times on
transactional e-commerce web workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; client perceived response time;
QoS; web server performance",
}
@Article{Thorup:2006:CIP,
author = "Mikkel Thorup",
title = "Confidence intervals for priority sampling",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "252--263",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140307",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With a priority sample from a set of weighted items,
we can provide an unbiased estimate of the total weight
of any subset. The strength of priority sampling is
that it gives the best possible estimate variance on
any set of input weights. For a concrete subset,
however, the variance on the estimate of its weight
depends strongly on the total set of weights and the
distribution of the subset in this set. The variance
is, for example, much smaller if weights are heavy
tailed. In this paper we show how to generate a
confidence interval directly from a priority sample,
thus complementing the weight estimates with concrete
lower and upper bounds. In particularly we will tell
how heavy subsets can likely be hidden when the
priority estimate for a subset is zero. Our confidence
intervals for priority sampling are evaluated on real
and synthetic data and compared with confidence
intervals obtained with uniform sampling, weighted
sampling with replacement, and threshold sampling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "confidence intervals; sampling",
}
@Article{Osogami:2006:FPBa,
author = "Takayuki Osogami and Toshinari Itoko",
title = "Finding probably better system configurations
quickly",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "264--275",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140308",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of computer and communication systems
can in theory be optimized by iteratively finding
better system configurations. However, a bottleneck is
the time required in simulations/experiments for
finding a better system configuration in each
iteration. We propose algorithms that quickly find a
system configuration that is probably better than the
`standard' system configuration, where the performance
of a given system configuration is estimated via
simulations or experiments. We prove that our
algorithms make correct decisions with high
probability, and various heuristics to reduce the total
simulation time are proposed. Numerical experiments
show the effectiveness of the proposed algorithms, and
this leads to several guidelines for designing
efficient and reliable optimization procedures for the
performance of computer and communication systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "local search; performance optimization; ranking and
selection; screening; simulation",
}
@Article{Bonald:2006:EMN,
author = "Thomas Bonald",
title = "The {Erlang} model with non-{Poisson} call arrivals",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "276--286",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140309",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Erlang formula is known to be insensitive to the
holding time distribution beyond the mean. While calls
are generally assumed to arrive as a Poisson process,
we prove that it is in fact sufficient that users
generate {\em sessions\/} according to a Poisson
process, each session being composed of a random,
finite number of calls and idle periods. A key role is
played by the retrial behavior in case of call
blocking. We illustrate the results by a number of
examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Erlang formula; insensitivity; loss networks",
}
@Article{Fidler:2006:WDS,
author = "Markus Fidler and Jens B. Schmitt",
title = "On the way to a distributed systems calculus: an
end-to-end network calculus with data scaling",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "287--298",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140310",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network calculus is a min-plus system theory which
facilitates the efficient derivation of performance
bounds for networks of queues. It has successfully been
applied to provide end-to-end quality of service
guarantees for integrated and differentiated services
networks. Yet, a true end-to-end analysis including the
various components of end systems as well as taking
into account mid-boxes like firewalls, proxies, or
media gateways has not been accomplished so far. The
particular challenge posed by such systems are
transformation processes, like data processing,
compression, encoding, and decoding, which may alter
data arrivals drastically. The heterogeneity, which is
reflected in the granularity of operation, for example
multimedia applications process video frames which,
however, are represented by packets in the network,
complicates the analysis further. To this end this
paper evolves a concise network calculus with scaling
functions, which allow modelling a wide variety of
transformation processes. Combined with the concept of
packetizer this theory enables a true end-to-end
analysis of distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network calculus; packetizers; scaling functions",
}
@Article{Peserico:2006:RNC,
author = "Enoch Peserico and Larry Rudolph",
title = "Robust network connectivity: when it's the big picture
that matters",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "299--310",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140312",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work analyzes the connectivity of large diameter
networks where every link has an independent
probability p of failure. We give a (relatively simple)
topological condition that guarantees good connectivity
between regions of such a network. Good connectivity
means that the regions are connected by nearly as many
disjoint, fault-free paths as there are when the entire
network is fault-free. The topological condition is
satisfied in many cases of practical interest, even
when two regions are at a distance much larger than the
expected `distance between faults', 1/p. We extend this
result to networks with failures on nodes, as well as
geometric radio networks with random distribution of
nodes in a deployment area of a given topography. A
rigorous formalization of the intuitive notion of
`hole' in a (not necessarily planar) graph is at the
heart of our result and our proof. Holes, in the
presence of faults, degrade connectivity in the region
`around' them to a distance that grows with the size of
the hole and the density of faults. Thus, to guarantee
good connectivity between two regions even in the
presence of faults, the intervening network should not
only sport multiple paths, but also not too many large
holes. Our result essentially characterizes networks
where connectivity depends on the `big picture'
structure of the network, and not on the local `noise'
caused by faulty or imprecisely positioned nodes and
links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "ad hoc; connectivity; fault; network; percolation;
random; resilient; topology",
}
@Article{Dong:2006:PCT,
author = "Qunfeng Dong and Suman Banerjee and Jia Wang and
Dheeraj Agrawal and Ashutosh Shukla",
title = "Packet classifiers in ternary {CAMs} can be smaller",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "311--322",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140313",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Serving as the core component in many packet
forwarding, differentiating and filtering schemes,
packet classification continues to grow its importance
in today's IP networks. Currently, most vendors use
Ternary CAMs (TCAMs) for packet classification. TCAMs
usually use brute-force parallel hardware to
simultaneously check for all rules. One of the
fundamental problems of TCAMs is that TCAMs suffer from
range specifications because rules with range
specifications need to be translated into multiple TCAM
entries. Hence, the cost of packet classification will
increase substantially as the number of TCAM entries
grows. As a result, network operators hesitate to
configure packet classifiers using range
specifications. In this paper, we optimize packet
classifier configurations by identifying semantically
equivalent rule sets that lead to reduced number of
TCAM entries when represented in hardware. In
particular, we develop a number of effective
techniques, which include: trimming rules, expanding
rules, merging rules, and adding rules. Compared with
previously proposed techniques which typically require
modifications to the packet processor hardware, our
scheme does not require any hardware modification,
which is highly preferred by ISPs. Moreover, our scheme
is complementary to previous techniques in that those
techniques can be applied on the rule sets optimized by
our scheme. We evaluate the effectiveness and potential
of the proposed techniques using extensive experiments
based on both real packet classifiers managed by a
large tier-1 ISP and synthetic data generated randomly.
We observe significant reduction on the number of TCAM
entries that are needed to represent the optimized
packet classifier configurations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; semantic equivalence; ternary
CAM",
}
@Article{Zhao:2006:DNS,
author = "Qi Zhao and Jun Xu and Zhen Liu",
title = "Design of a novel statistics counter architecture with
optimal space and time efficiency",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "323--334",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140314",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of how to efficiently maintain a large
number (say millions) of statistics counters that need
to be incremented at very high speed has received
considerable research attention recently. This problem
arises in a variety of router management algorithms and
data streaming algorithms, where a large array of
counters is used to track various network statistics
and to implement various counting sketches
respectively. While fitting these counters entirely in
SRAM meets the access speed requirement, a large amount
of SRAM may be needed with a typical counter size of 32
or 64 bits, and hence the high cost. Solutions proposed
in recent works have used hybrid architectures where
small counters in SRAM are incremented at high speed,
and occasionally written back ('flushed') to larger
counters in DRAM. Previous solutions have used complex
schedulers with tree-like or heap data structures to
pick which counters in SRAM are about to overflow, and
flush them to the corresponding DRAM counters. In this
work, we present a novel hybrid SRAM/DRAM counter
architecture that consumes much less SRAM and has a
much simpler design of the scheduler than previous
approaches. We show, in fact, that our design is
optimal in the sense that for a given speed difference
between SRAM and DRAM, our design uses the
theoretically minimum number of bits per counter in
SRAM. Our design uses a small write-back buffer (in
SRAM) that stores indices of the overflowed counters
(to be flushed to DRAM) and an extremely simple
randomized algorithm to statistically guarantee that
SRAM counters do not overflow in bursts large enough to
fill up the write-back buffer even in the worst case.
The statistical guarantee of the algorithm is proven
using a combination of worst case analysis for
characterizing the worst case counter increment
sequence and a new tail bound theorem for bounding the
probability of filling up the write-back buffer.
Experiments with real Internet traffic traces show that
the buffer size required in practice is significantly
smaller than needed in the worst case.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; router; statistics counter",
}
@Article{Kumar:2006:FMP,
author = "Rakesh Kumar and David D. Yao and Amitabha Bagchi and
Keith W. Ross and Dan Rubenstein",
title = "Fluid modeling of pollution proliferation in {P2P}
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "335--346",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140316",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "P2P systems are highly vulnerable to pollution attacks
in which attackers inject multiple versions of
corrupted content into the system, which is then
further proliferated by unsuspecting users. However, to
our knowledge, there are no closed-form solutions that
describe this phenomenon, nor are there models that
describe how the injection of multiple versions of
corrupted content impacts a clients' ability to receive
a valid copy. In this paper we develop a suite of fluid
models that model pollution proliferation in P2P
systems. These fluid models lead to systems of
non-linear differential equations. We obtain
closed-form solutions for the differential equations;
for the remaining models, we efficiently solve the
differential equations numerically. The models capture
a variety of user behaviors, including propensity for
popular versions, abandonment after repeated failure to
obtain a good version, freeloading, and local version
blacklisting. Our analysis reveals intelligent
strategies for attackers as well as strategies for
clients seeking to recover non-polluted content within
large-scale P2P networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fluid model; Markov chain; P2P; pollution attack",
}
@Article{Li:2006:FSS,
author = "Kang Li and Zhenyu Zhong",
title = "Fast statistical spam filter by approximate
classifications",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "347--358",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140317",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Statistical-based Bayesian filters have become a
popular and important defense against spam. However,
despite their effectiveness, their greater processing
overhead can prevent them from scaling well for
enterprise-level mail servers. For example, the
dictionary lookups that are characteristic of this
approach are limited by the memory access rate,
therefore relatively insensitive to increases in CPU
speed. We address this scaling issue by proposing an
acceleration technique that speeds up Bayesian filters
based on approximate classification. The approximation
uses two methods: hash-based lookup and lossy encoding.
Lookup approximation is based on the popular Bloom
filter data structure with an extension to support
value retrieval. Lossy encoding is used to further
compress the data structure. While both methods
introduce additional errors to a strict Bayesian
approach, we show how the errors can be both minimized
and biased toward a false negative classification. We
demonstrate a 6x speedup over two well-known spam
filters (bogofilter and qsf) while achieving an
identical false positive rate and similar false
negative rate to the original filters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximation; Bayesian filter; bloom filter; SPAM",
}
@Article{Kola:2006:QAB,
author = "George Kola and Mary K. Vernon",
title = "{QuickProbe}: available bandwidth estimation in two
roundtrips",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "359--360",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140319",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "available bandwidth estimation",
}
@Article{Kaushik:2006:FTW,
author = "Neena R. Kaushik and Silvia M. Figueira and Stephen A.
Chiappari",
title = "Flexible time-windows for advance reservation in
{LambdaGrids}",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "361--362",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140320",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advance-reservation requests are an essential feature
of LambdaGrids, where resources may need to be
co-allocated at pre-determined times. In this paper, we
discuss unconstrained advance reservations, which use
flexible time-windows to lower blocking probability
and, consequently, increase resource utilization. We
claim and show using simulations that the minimum
window size, which theoretically brings the blocking
probability to 0, in a first-come-first-served advance
reservation model without time-slots, equals the
waiting time in a queue-based on-demand model. We also
show, with simulations, the window sizes, which bring
the blocking probability to its minimum, for an advance
reservation model with time-slots.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "advance reservation; flexible time-windows;
LambdaGrids; scheduling",
}
@Article{Verbowski:2006:APS,
author = "Chad Verbowski and Emre Kiciman and Brad Daniels and
Yi-Min Wang and Roussi Roussev and Shan Lu and Juhan
Lee",
title = "Analyzing persistent state interactions to improve
state management",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "363--364",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140321",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file; persistent state; registry; state management;
system management; trace",
}
@Article{Verloop:2006:DOS,
author = "Maaike Verloop and Rudesindo N{\'u}{\~n}ez-Queija and
Sem Borst",
title = "Delay-optimal scheduling in bandwidth-sharing
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "365--366",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140322",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "alpha-fair strategies; bandwidth-sharing networks;
delay optimization",
}
@Article{Menth:2006:TPP,
author = "Michael Menth and Robert Henjes and Christian Zepfel
and Sebastian Gehrsitz",
title = "Throughput performance of popular {JMS} servers",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "367--368",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140323",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Java Messaging Service (JMS) facilitates
communication among distributed software components
according to the publish/subscribe principle. If the
subscribers install filter rules on the JMS server, JMS
can be used as a message routing platform, but it is
not clear whether its message throughput is
sufficiently high to support large-scale systems. In
this paper, we investigate the capacity of three high
performance JMS server implementations: FioranoMQ,
SunMQ, and WebsphereMQ. In contrast to other studies,
we focus on the message throughput in the presence of
filters and show that filtering reduces the performance
significantly. We present models for the message
processing time of each server and validate them by
measurement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "java messaging service; publish/subscribe; server
performance",
}
@Article{Garg:2006:OHR,
author = "Rahul Garg and Yogish Sabharwal",
title = "Optimizing the {HPCC} randomaccess benchmark on {Blue
Gene\slash L} supercomputer",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "369--370",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140324",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of supercomputers has traditionally
been evaluated using the LINPACK benchmark [3], which
stresses only the floating point units without
significantly loading the memory or the network
subsystems.\par
The HPC Challenge (HPCC) benchmark suite is being
proposed as an alternative to evaluate the performance
of supercomputers. It consists of seven benchmarks,
each designed to measure a specific aspect of the
system performance. These benchmarks include (i) the
high performance LINPACK (HPL) (ii) DGEMM, which
measures the floating point rate of execution of double
precision real matrix-matrix multiplication, (iii)
STREAM that measures sustainable memory bandwidth and
the corresponding computation rate for four simple
vector kernels, namely, copy, scale, add and triad (iv)
PTRANS that exercises the network by taking parallel
transpose of a large distributed matrix (v)
Randomaccess that measures the rate of integer updates
to random memory locations (vi) FFT which measures the
floating point rate of execution of a double precision
complex one-dimensional Discrete Fourier Transform
(DFT) and (vii) communication bandwidth and latency
which measures latency and bandwidth of a number of
simultaneous communication patterns.\par
In this paper we outline the optimization techniques
used to obtain the presently best reported performance
of the HPCC Randomaccess benchmark on the Blue Gene/L
supercomputer.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmarks; high performance computing; randomaccess",
}
@Article{Piotrowski:2006:PPS,
author = "Tadeusz Piotrowski and Suman Banerjee and Sudeept
Bhatnagar and Samrat Ganguly and Rauf Izmailov",
title = "Peer-to-peer streaming of stored media: the indirect
approach",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "371--372",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140325",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "media-streaming; overlays; peer-to-peer",
}
@Article{Dholakia:2006:ANI,
author = "Ajay Dholakia and Evangelos Eleftheriou and Xiao-Yu Hu
and Ilias Iliadis and Jai Menon and KK Rao",
title = "Analysis of a new intra-disk redundancy scheme for
high-reliability {RAID} storage systems in the presence
of unrecoverable errors",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "373--374",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140277.1140326",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Today's data storage systems are increasingly adopting
low-cost disk drives that have higher capacity but
lower reliability, leading to more frequent rebuilds
and to a higher risk of unrecoverable media errors. We
propose a new XOR-based intra-disk redundancy scheme,
called interleaved parity check (IPC), to enhance the
reliability of RAID systems that incurs only negligible
I/O performance degradation. The proposed scheme
introduces an additional level of redundancy inside
each disk, on top of the RAID redundancy across
multiple disks. The RAID parity provides protection
against disk failures, while the proposed scheme aims
to protect against media-related unrecoverable errors.
We develop a new model capturing the effect of
correlated unrecoverable sector errors and subsequently
use it to analyze the proposed scheme as well as the
traditional redundancy schemes based on Reed--Solomon
(RS) codes and single-parity-check (SPC) codes. We
derive closed-form expressions for the mean time to
data loss (MTTDL) of RAID 5 and RAID 6 systems in the
presence of unrecoverable errors and disk failures. We
then combine these results for a comprehensive
characterization of the reliability of RAID systems
that incorporate the proposed IPC redundancy scheme.
Our results show that in the practical case of
correlated errors, the proposed scheme provides the
same reliability as the optimum albeit more complex RS
coding scheme. Finally, the throughput performance of
incorporating the intra-disk redundancy on various RAID
systems is evaluated by means of event-driven
simulations. A detailed description of these
contributions is given in [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file and I/O systems; RAID; reliability analysis;
stochastic modeling",
}
@Article{Bower:2006:AAV,
author = "Fred A. Bower and Derek Hower and Mahmut Yilmaz and
Daniel J. Sorin and Sule Ozev",
title = "Applying architectural vulnerability {Analysis} to
hard faults in the microprocessor",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "375--376",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140327",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a new metric, Hard-Fault
Architectural Vulnerability Factor (H-AVF), to allow
designers to more effectively compare alternate
hard-fault tolerance schemes. In order to provide
intuition on the use of H-AVF as a metric, we evaluate
fault-tolerant level-1 data cache and register file
implementations using error correcting codes and a
fault-tolerant adder using triple-modular redundancy
(TMR). For each of the designs, we compute its H-AVF.
We then use these H-AVF values in conjunction with
other properties of the design, such as die area and
power consumption, to provide composite metrics. The
derived metrics provide simple, quantitative measures
of the cost-effectiveness of the evaluated designs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer architecture; hard-fault tolerance;
reliability",
}
@Article{Broberg:2006:MFM,
author = "James A. Broberg and Zhen Liu and Cathy H. Xia and Li
Zhang",
title = "A multicommodity flow model for distributed stream
processing",
journal = j-SIGMETRICS,
volume = "34",
number = "1",
pages = "377--378",
month = jun,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1140103.1140328",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:21:37 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithms; multicommodity flow; potential
function; stream processing",
}
@Article{Bonald:2006:GEF,
author = "T. Bonald",
title = "{Guest Editor}'s foreword",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "2--2",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168136",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance 2005, the 24-th International Symposium on
Computer Performance, Modeling, Measurements and
Evaluation, was held in Juan-les-Pins, France, on
October 3-7, 2005. In addition to the main technical
program, a poster session was organized so that ongoing
or recent research work could be presented and
discussed in an informal setting. Submissions were
solicited as extended abstracts and reviewed by members
of the poster committee. A total of 12 posters were
selected for presentation during the conference. This
special issue of {\em Performance Evaluation Review\/}
consists of the corresponding extended abstracts, which
cover a wide range of topics in the area of performance
evaluation, analytical modeling and simulation of
computer systems and communication networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hautphenne:2006:EPP,
author = "Sophie Hautphenne and Kenji Leibnitz and Marie-Ange
Remiche",
title = "Extinction probability in peer-to-peer file
diffusion",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "3--4",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168137",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent measurement studies [8] have shown that
peer-to-peer (P2P) file sharing applications are the
major traffic source in the Internet. P2P applications,
such as eDonkey, Kazaa, or BitTorrent, form overlay
networks on the application layer and offer its peers
to download and share their files with other peers in a
highly distributed way. As a consequence, peers act
simultaneously as both clients and servers. For a
comprehensive survey of P2P technology, we refer to
[7].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mundinger:2006:APPa,
author = "J. Mundinger and R. R. Weber and G. Weiss",
title = "Analysis of peer-to-peer file dissemination amongst
users of different upload capacities",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "5--6",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, overlay networks have proven an
effective way of disseminating a file from a single
source to a group of end users via the Internet. A
number of algorithms and protocols have been suggested,
implemented and studied. In particular, much attention
has been given to peer-to-peer (P2P) systems such as
BitTorrent [2], Slurpie [10], SplitStream [1] and
Bullet [5]. The key idea is that the file is divided
into $M$ parts of equal size and that a given user may
download any one of these either from the server or
from a peer who has previously downloaded it. More
recently, a scheme based on network coding [3] has been
suggested. Here, users down-load linear combinations of
file parts rather than individual file parts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Besson:2006:GSE,
author = "Emmanuel Besson and Aline Gouget and Herv{\'e}
Sibert",
title = "The {GAIA} sensor: an early {DDoS} detection tool",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "7--8",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed Denial of Service (DDoS) attacks are a
major network security threat. Most recent host-based
DDoS detection mechanisms are dedicated to a particular
set of attacks, focusing either on the recent dynamic
of the traffic, or on its long range dependence. We
propose a DDoS early detection component based on
anomaly detection which combines static and dynamic
behavior analysis, including experimental results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hardy:2006:PCR,
author = "G. Hardy and C. Lucet and N. Limnios",
title = "Probability of connection in regular stochastic
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "9--10",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168140",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we report experiments we did on network
reliability with the BDD-based exact method we present
in [1].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Busic:2006:BTS,
author = "Ana Bu{\v{s}}i{\'c} and Jean-Michel Fourneau",
title = "Bounding transient and steady-state dependability
measures through algorithmic stochastic comparison",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "11--12",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We are interested in bounding dependability measures
like point and steady-state availability and
reliability of systems modelled by very large Markov
chains which are not numerically tractable. We suppose
that the state space is divided into two classes, UP
(system is operational) and DOWN states. The
reliability at time $t$ is defined as the probability
that the system has always been operational between 0
and $t$. The point availability is the probability that
the system is operational at time $t$, and the
steady-state availability is the limit, if it exists,
of this probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bossie:2006:CHT,
author = "Craig Bossie and Pierre M. Fiorini",
title = "On checkpointing and heavy-tails in unreliable
computing environments",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "13--15",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168142",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we discuss checkpointing issues that
should be considered whenever jobs execute in
unreliable computing environments. Specifically, we
show that if proper check-pointing procedures are not
properly implemented, then under certain conditions,
job completion time distributions exhibit properties of
{\em heavy-tail\/} or {\em power-tail\/} distributions
(hereafter referred to as power-tail distributions
(PT)), which can lead to highly-variable and long
completion times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mickens:2006:IDS,
author = "James W. Mickens and Brian D. Noble",
title = "Improving distributed system performance using machine
availability prediction",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "16--18",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168143",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a distributed system, a set of networked machines
provides a highly available service to remote clients.
Traditional distributed systems like AFS [2] make a
clear distinction between clients and servers. Client
machines may be poorly administered, cheaply
constructed, often offline, and possibly malicious. In
contrast, servers are expected to be well-administered
and almost always online. Highly available servers
ensure the availability and reliability of the
distributed service.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chydzinski:2006:BOC,
author = "Andrzej Chydzinski",
title = "Buffer overflow calculations in a batch arrival
queue",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "19--21",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168144",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper numerical calculations of the buffer
overflow time in a batch arrival queueing system are
presented. The results indicate that an auto-correlated
input stream, heavy-tailed batch size or service time
distribution have a critical influence on the frequency
of buffer overflows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasce:2006:ECP,
author = "Daniel A. Menasc{\'e} and Vasudeva Akula",
title = "Evaluating caching policies for online auctions",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "22--23",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168145",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Auction sites have grown rapidly in the last couple of
years and recent statistics indicate that eBay carries
about 50 million items for sale at any time on its site
[2]. Our previous work showed that the workload of
online auction sites is substantially different from
that of online retailers and uncovered a plethora of
interesting findings that can be used, among other
things, to improve the performance of online auction
sites [1, 3]: (i) A very large percentage of auctions
have a relatively low number of bids and bidders and a
very small percentage of auctions have a high number of
bids and bidders. (ii) There is some bidding activity
at the beginning stages of an auction. This activity
slows down in the middle and increases considerably
after 90\% of an auction's life time has elapsed. (iii)
Prices rise faster in the first 20\% of an auction's
life time than in the next 70\% of its life time.
However, after the age of an auction reaches 90\%,
prices increase much faster than in the two previous
phases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vincent:2006:PSI,
author = "Jean-Marc Vincent and J{\'e}r{\^o}me Vienne",
title = "Perfect simulation of index based routing queueing
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "24--25",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markovian queueing networks models are widely used for
performance evaluation of computer systems, production
lines, communication networks and so on. Routing
strategies allocate clients to queues after the end of
service. In many situations such as deterministic,
probabilistic, or state dependent like {\em Join the
shortest queue\/} routing, the routing function could
be written in terms of index scheduling functions
introduced in [3, 6].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chang:2006:STQ,
author = "Cheng-Shang Chang and Yi-Ting Chen and Jay Cheng and
Po-Kai Huang and Duan-Shin Lee",
title = "From switching theory to `queueing' theory",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "26--28",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168147",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing theory is generally known as the theory to
study the performance of queues. In this extended
abstract, we are interested in another aspect of
queueing theory, the theory to construct queues. Our
interest in constructing queues originates from optical
packet switching. Traditionally, queues are relatively
cheap to build via electronic memory. However, it is
very costly to convert optical packets into electronic
packets. As such, building optical queues with minimum
complexity has become an important research topic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Giannoulis:2006:CLP,
author = "Anastasios Giannoulis and Konstantinos P. Tsoukatos
and Leandros Tassiulas",
title = "Cross-layer power control in wireless networks",
journal = j-SIGMETRICS,
volume = "34",
number = "2",
pages = "29--31",
month = sep,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1168134.1168148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:24 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a power control algorithm that exploits
queue length information to achieve maximum data
throughput in single-hop CDMA wireless networks. The
algorithm operates in real-time, i.e., executes a
single iteration per data transmission. A variant of
the algorithm employing the exponential scheduling rule
steers queue length ratios to desired targets.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2006:F,
author = "Mark S. Squillante",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "2--2",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215959",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nakassis:2006:TPQ,
author = "Anastase Nakassis and Vladimir Marbukh",
title = "Towards power and {QoS} aware wireless networks",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "3--5",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215960",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The paper studies the optimal use of energy in
wireless networking, the feasibility region of tasks
that share a multi-access channel, and efficient
algorithms for determining if a given set of tasks and
resources falls within the feasibility region.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network information theory; Pareto optimality",
}
@Article{Yazici:2006:EPD,
author = "Emine {\c{S}}ule Yazici and Selda
K{\"u}{\c{c}}{\"u}k{\c{c}}if{\c{c}}i and {\"O}znur
{\"O}zkasap and Mine {\c{C}}a{\u{g}}lar",
title = "Exact probability distributions for peer-to-peer
epidemic information diffusion",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "6--8",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215961",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An efficient approach for information diffusion in
distributed systems is to utilize epidemic algorithms
that involve pair-wise propagation of updates. Epidemic
algorithms are fully distributed and randomized
approaches such that every peer in an information
diffusion session picks a (subset of the other) peer(s)
randomly for efficient propagation of updates, through
periodic rounds. The underlying epidemics theory for
the biological systems studies the spreading of
infectious diseases through a population [1,2]. When
applied to an information diffusion application, such
protocols have beneficial features such as scalability,
robustness against failures and provision of eventual
consistency. Exact as well as asymptotic distributions
have been studied for different epidemic models in
[3,4]. In contrast to such previous studies, we
investigate variations of the epidemic algorithms used
in the context of distributed information diffusion and
derive exact diffusion probabilities for them.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Luan:2006:MOC,
author = "Hao Luan and Danny H. K. Tsang and Kin Wah Kwong",
title = "Media overlay construction via a {Markov} chain {Monte
Carlo} method",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "9--11",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215962",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider the fairness issue of BT
and tackle the problem with a general framework using
proactive topology adaptations. The topology formed
possesses a special link-level homogeneity property
with each peer having the same capacity per out-degree
value. Such property guarantees that each directional
link has the same uploading bandwidth. Together with
the Tit-for-Tat policy, peers upload and download at
the same rate over each connection and therefore
achieve fairness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mundinger:2006:APPb,
author = "Jochen Mundinger and Richard Weber and Gideon Weiss",
title = "Analysis of peer-to-peer file dissemination",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "12--14",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215963",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, overlay networks have proven a
popular way of disseminating potentially large files
from a single server $S$ to a potentially large group
of $N$ end users via the Internet. A number of
algorithms and protocols have been suggested,
implemented and studied. In particular, much attention
has been given to peer-to-peer (P2P) systems such as
BitTorrent [5], Slurpie [20], SplitStream [4], Bullet
[11] and Avalanche [6]. The key idea is that the file
is divided into $M$ parts of equal size and that a
given user may download any one of these --- or, for
Avalanche, linear combinations of these --- either from
the server or from a peer who has previously downloaded
it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Raz:2006:TMS,
author = "David Raz and Hanoch Levy and Benjamin Avi-Itzhak",
title = "On the twin measure and system predictability and
fairness",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "15--17",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215964",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two identical customers with deterministically
identical service times arrive at a queueing system
simultaneously (Twins), but leave the system 2 hours
apart. Is their sojourn time predictable? Is the system
fair? We propose a novel measure based on the principle
that in a predictable and fair system, `twin' customers
should not depart the system very far apart. We analyze
this measure for a number of common service policies
and compare the results. We compare the results to
those of other fairness and predictability approaches
proposed recently and discuss its usefulness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brown:2006:CFP,
author = "Patrick Brown",
title = "Comparing {FB} and {PS} scheduling policies",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "18--20",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215965",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we obtain new results concerning the
expected response time of the foreground-background
(FB) scheduling discipline and its comparison with
processor sharing (PS). Some results previously derived
for job sizes with finite second moment or bounded
sizes, are extended to infinite second moments. New
bounds and asymptotic results are also derived. We show
that for job sizes with infinite second moment large
jobs may benefit from the FB scheduling discipline
although this discipline favors short jobs. For certain
distributions all jobs sizes may even benefit from FB
with respect to PS showing that the performance
benefits obtained by some job sizes need not be
obtained at the expense of others.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2006:EIS,
author = "Adam Wierman",
title = "On the effect of inexact size information in size
based policies",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "21--23",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215966",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, there have been a number of scheduling
success stories in computer applications. Across a wide
array of applications, the simple heuristic of
`prioritizing small jobs' has been used to reduce user
response times with enormous success. For instance,
variants of Shortest-Remaining-Processing-Time (SRPT)
and Preemptive-Shortest-Job-First (PSJF) have been
suggested for use in web servers [5, 12], wireless
applications [6], and databases [8]. As a result of the
attention given to size based policies by computer
systems researchers, there has been a resurgence in
analytical work studying these policies. However, the
policies studied in theory, e.g. SRPT and PSJF, are
idealized versions of the policies implemented by
practitioners. In particular, the intricacies of
computer systems force the use of complex hybrid
policies in practice, though these more complex
policies are still built around the heuristic of
`prioritizing small jobs.' Thus, there exists a gap
between the results provided by theoretical research
and the needs of practitioners. This gap results from
three primary disconnects between the model studied in
theory and the needs of system designers. First, in
designing systems, the goal is not simply to provide
small response times; other performance measures are
also important. Thus, idealized policies such as SRPT
and PSJF are often tweaked by practitioners to perform
well on secondary performance measures (e.g. fairness
and slowdown) [3, 11, 12]. Second, the overhead
involved in distinguishing between an infinite number
of different priority classes typically causes system
designers to discretize policies such as SRPT and PSJF
so that they use only a small number of priority
classes (5-10) [5, 11]. Third, in many cases
information about the service demands (sizes) of jobs
is inexact. For instance, when serving static content,
web servers have exact knowledge of the sizes of the
files being served, but have inexact knowledge of
network conditions. Thus, the web server only has an
estimate of the true service demand [7, 12].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sheahan:2006:CTD,
author = "Robert Sheahan and Lester Lipsky and Pierre M. Fiorini
and S{\o}ren Asmussen",
title = "On the completion time distribution for tasks that
must restart from the beginning if a failure occurs",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "24--26",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215967",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For many systems, failure is so common that the design
choice of how to deal with it may have a significant
impact on the performance of the system. There are many
specific and distinct failure recovery schemes, but
they can be grouped into three broad classes: {\em
RESUME}, also referred to as preemptive resume (prs),
or check-pointing; {\em REPLACE}, also referred to as
preemptive repeat different (prd); and {\em RESTART},
also referred to as preemptive repeat identical (pri).
The following describes the three recovery schemes: (1)
{\em RESUME:\/} when a task is fails, it knows exactly
where it stops, and can continue from that point when
allowed to resume; (2) {\em REPLACE:\/} given a task
fails, then when it begins processing again, it starts
with a brand new task sampled from the same task time
distribution; and, (3) {\em RESTART:\/} When a task
fails, it loses all that it had acquired to up to that
point and must start anew when upon continuing later.
This is distinctly different from (2) since the task
must run at least as long as it did before it failed,
whereas a new sample, selected at random, might run for
a shorter or longer time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Silveira:2006:MST,
author = "Fernando Silveira and Edmundo {de Souza e Silva}",
title = "Modeling the short-term dynamics of packet losses",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "27--29",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215968",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet loss models play an essential role in computer
networks analysis. Performance evaluation studies often
abstract the loss and delay characteristics of a path
or network with a single end-to-end analytical model.
This model should be able to represent the
characteristics of the path and accurately reproduce
the impact of delay and losses on the studied protocol
while keeping complexity low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ott:2006:SSP,
author = "Teunis J. Ott and Jason Swanson",
title = "Stationarity of some processes in transport
protocols",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "30--32",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215969",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This note establishes stationarity of a number of
stochastic processes of interest in the study of
Transport Protocols. For many of the processes studied
in this note stationarity had been established before,
but for one class the result is new. For that class, it
was counterintuitive that stationarity was hard to
prove. This note also explains why that class offered
such stiff resistance. The stationarity is proven using
Liapunov functions, without first proving tightness by
proving boundedness of moments. After the 2006 MAMA
workshop simple conditions for existence of such
moments were obtained and were added to this note.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2006:FDT,
author = "Yuliy Baryshnikov and Ed Coffman and Jing Feng and
Vishal Misra",
title = "Free-Drop {TCP}",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "33--35",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215970",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a new class of TCP congestion control
algorithms that take a non-standard approach: instead
of modifying AIMD parameters or exploiting traffic
measurements, the new protocols modify the rule for
deciding when to cut the congestion window. The class
is defined by an additional window with a packet-count
parameter $w$; the congestion window is reduced by half
when a packet loss is detected, at time $t$ say, if and
only if there has been at least one dropped packet in
the last $w$ packet transmissions prior to time $t$. An
algorithm in the class is called {\em Free-Drop TCP},
since dropped packets are `free' (they do not cause
cuts in the window size) unless they are sufficiently
bursty. We propose this new class as a means to achieve
high utilizations in high bandwidth-delay product
networks with small buffers. We analyze a fluid model
which leads to explicit estimates of the average
throughput for small loss probabilities. We then give
the results of experiments, which show that, relative
to TCP, a family of `shifted' response functions of the
form $ O(1 / \sqrt {p} - \epsilon)$ can be obtained
over a wide range of $p$ by suitably varying $w$.
Potential costs of these increases in throughput are
also examined in terms of coefficents of variation and
Jain's fairness measure. The costs range from
negligible to moderate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Carofiglio:2006:ARS,
author = "G. Carofiglio and C. Chiasserini and M. Garetto and E.
Leonardi",
title = "Analysis of route stability under the random direction
mobility model",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "36--38",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215971",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we study the stability of routing paths
in a Mobile Ad-hoc Network (MANET), where links are
subject to failure due to nodes' mobility. We focus on
the Random Direction mobility model, and consider as
metrics of interest the duration and availability of
links and paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Osogami:2006:FPBb,
author = "Takayuki Osogami",
title = "Finding probably best system configurations quickly",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "39--41",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215972",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer systems often have many possible
configurations, and designing a high performance system
often requires selecting the best configuration.
Unfortunately, the performance of complex systems can
often be estimated only via simulations, or with
measurements of real systems. Since longer simulation
times are required to estimate the performance more
accurately, it is often computationally intractable to
estimate the performance of all configurations
accurately via simulations. (Measurements of real
systems can take even longer.)",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2006:AOT,
author = "David D. Yao and Heng-Qing Ye",
title = "Asymptotic optimality of threshold control in a
stochastic network based on a fixed-point
approximation",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "42--44",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215973",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In Li and Yao [5], a stochastic network with
simultaneous resource occupancy is studied, and a
threshold control policy is proposed based on a
fixed-point approximation. Here, we establish the
asymptotic optimality of this control policy under
fluid and diffusion scaling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bayati:2006:OSM,
author = "Mohsen Bayati and Mayank Sharma and Mark S.
Squillante",
title = "Optimal scheduling in a multiserver stochastic
network",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "45--47",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215974",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a fundamental scheduling problem in a
multiserver stochastic network consisting of 2 classes
of customers and 2 classes of servers. Customers of
class $k$ arrive to queue $k$ according to a Poisson
process with rate $ \lambda_k, k = 1, 2$. The service
times of class $k$ customers at class $ \ell $ servers
are i.i.d. following an exponential distribution with
mean $ \mu_{k, \ell }^{-1}, \forall k, \ell = 1, 2$,
where $ 0 < \mu {1, 1}, \mu_{1, 2}, \mu_{2, 2} < \infty
$ and $ \mu {2, 1} = 0$. Hence, class 1 customers can
be served at both classes of servers, but class 2
customers can only be served at class 2 servers. A FCFS
queueing discipline is employed at each queue. The
customer arrival and service processes are mutually
independent of each other and of all resource
allocation decisions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elhaddad:2006:ATS,
author = "Mahmoud Elhaddad and Rami Melhem and Taieb Znati",
title = "Analysis of a transmission scheduling algorithm for
supporting bandwidth guarantees in bufferless
networks",
journal = j-SIGMETRICS,
volume = "34",
number = "3",
pages = "48--63",
month = dec,
year = "2006",
CODEN = "????",
DOI = "https://doi.org/10.1145/1215956.1215957",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:26 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a network of bufferless packet multiplexers, the
user-perceived capacity of an ingress-egress tunnel
(connection) may degrade quickly with increasing path
length. This is due to the compounding of transmission
blocking probabilities along the path of the
connection, even when the links are not overloaded. In
such an environment, providing users (e.g., client
ISPs) with tunnels of statistically guaranteed
bandwidth may limit the network's connection-carrying
capacity. In this paper, we introduce and analyze a
transmission-scheduling algorithm that employs
randomization and traffic regulation at the ingress,
and batch scheduling at the links. The algorithm
ensures that a fraction of transmissions from each
connection is consistently subject to small blocking
probability at every link, so that these transmissions
are likely to survive long paths. For this algorithm,
we obtain tight bounds on the expectation and tail
probability of the blocking rate of any ingress-egress
connection. We compare the bounds to those obtained
using the FCFS link-scheduling rule. We find that the
proposed scheduling algorithm significantly improves
the network's connection-carrying capacity. In deriving
the desired bounds, we develop an analytic framework
for stochastically comparing network-wide routing and
bandwidth allocation scenarios with respect to blocking
in a packet multiplexer. The framework enables us to
formally characterize the routing and bandwidth
allocation scenarios that maximize the expected
blocking rate along the path of a tagged connection.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2007:F,
author = "Mor Harchol-Balter",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "2--3",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "I would like to introduce this issue by telling a
story. Sometime back in 1997, I wrote a paper on a new
idea for improving the response times of http requests
at a Web server. The idea was to schedule the HTTP
requests so as to favor requests for small files, in
accordance with the well-known scheduling policy
Shortest Remaining Processing Time (SRPT). The paper
was rejected, for many reasons, but the review that
stuck in my mind was the one that said, {\em `Why is
this person writing about scheduling? Scheduling is
dead.'\/} According to this reviewer, everything that
would ever be known about scheduling was already
described in the beautiful {\em Theory of Scheduling\/}
book, written in 1967, by Conway, Maxwell, and
Miller.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wierman:2007:FC,
author = "Adam Wierman",
title = "Fairness and classifications",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "4--12",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growing trend in computer systems towards using
scheduling policies that prioritize jobs with small
service requirements has resulted in a new focus on the
fairness of such policies. In particular, researchers
have been interested in whether prioritizing small job
sizes results in large jobs being treated `unfairly.'
However, fairness is an amorphous concept and thus
difficult to define and study. This article provides a
short survey of recent work in this area.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boxma:2007:TS,
author = "Onno Boxma and Bert Zwart",
title = "Tails in scheduling",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "13--20",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper gives an overview of recent research on the
impact of scheduling on the tail behavior of the
response time of a job. We cover preemptive and
non-preemptive scheduling disciplines, consider
light-tailed and heavy-tailed distributions, and
discuss optimality properties. The focus is on results,
intuition and insight rather than methods and
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Biersack:2007:SP,
author = "Ernst W. Biersack and Bianca Schroeder and Guillaume
Urvoy-Keller",
title = "Scheduling in practice",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "21--28",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In queueing theory, it has been known for a long time
that the scheduling policy used in a system greatly
impacts user-perceived performance. For example, it has
been proven in the 1960's that size-based scheduling
policies that give priority to short jobs are optimal
with respect to mean response time. Yet, virtually no
systems today implement these policies. One reason is
that real systems are significantly more complex than a
theoretical M/M/1 or M/G/1 queue and it is not obvious
how to implement some of these policies in practice.
Another reason is that there is a fear that the big
jobs will `starve', or be treated unfairly as compared
to Processor-Sharing (PS). In this article we show,
using two important real world applications, that
size-based scheduling can be used in practice to
greatly improve mean response times in real systems,
without causing unfairness or starvation. The two
applications we consider are connection scheduling in
web servers and packet scheduling in network routers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2007:SNT,
author = "Thomas Bonald and James Roberts",
title = "Scheduling network traffic",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "29--35",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We discuss the potential of packet scheduling as a
means to control traffic and improve performance for
both wired and wireless links. Using simple queuing
models that take into account the random nature of
traffic, we draw practical conclusions about the
expected gains and limits of scheduling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bandwidth sharing; scheduling; service
differentiation",
}
@Article{Aalto:2007:BPS,
author = "Samuli Aalto and Urtzi Ayesta and Sem Borst and Vishal
Misra and Rudesindo N{\'u}{\~n}ez-Queija",
title = "Beyond processor sharing",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "36--43",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While the (Egalitarian) Processor-Sharing (PS)
discipline offers crucial insights in the performance
of fair resource allocation mechanisms, it is
inherently limited in analyzing and designing
differentiated scheduling algorithms such as Weighted
Fair Queueing and Weighted Round-Robin. The
Discriminatory Processor-Sharing (DPS) and Generalized
Processor-Sharing (GPS) disciplines have emerged as
natural generalizations for modeling the performance of
such service differentiation mechanisms. A further
extension of the ordinary PS policy is the Multilevel
Processor-Sharing (MLPS) discipline, which has captured
a pivotal role in the analysis, design and
implementation of size-based scheduling strategies. We
review various key results for DPS, GPS and MLPS
models, highlighting to what extent these disciplines
inherit desirable properties from ordinary PS or are
capable of delivering service differentiation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic analysis; delay minimization;
discriminatory processor sharing; generalized processor
sharing; in-sensitivity; multilevel processor sharing;
queue length; service differentiation; size-based
scheduling; slowdown; sojourn time; workload",
}
@Article{Squillante:2007:SAM,
author = "Mark S. Squillante",
title = "Stochastic analysis of multiserver systems",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "44--51",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an overview of research in the
stochastic analysis of multiserver systems, where
scheduling often play a critical role. Our primary
focus is on the stochastic analysis and optimization of
multiserver systems in general, since most of this
research directly investigates scheduling issues and
all of this research provides the methods and results
that have been and will continue to be used to study
existing and future multiserver scheduling issues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pruhs:2007:COS,
author = "Kirk Pruhs",
title = "Competitive online scheduling for server systems",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "52--58",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Our goal here is to illustrate the competitive online
scheduling research community's approach to online
server scheduling problems by enumerating some of the
results obtained for problems related to response and
slowdown, and by explaining some of the standard
analysis techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2007:AMJ,
author = "Hui Li and Michael Muskulus",
title = "Analysis and modeling of job arrivals in a production
grid",
journal = j-SIGMETRICS,
volume = "34",
number = "4",
pages = "59--70",
month = mar,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1243401.1243402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:27 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an initial analysis of job
arrivals in a production data-intensive Grid and
investigate several traffic models for the interarrival
time processes. Our analysis focuses on the heavy-tail
behavior and autocorrelations, and the modeling is
carried out at three different levels: {\em Grid,
Virtual Organization (VO)}, and {\em region}. A set of
{\em $m$-state Markov modulated Poisson processes
(MMPP)\/} is investigated, while {\em Poisson
processes\/} and {\em hyperexponential renewal
processes\/} are evaluated for comparison studies. We
apply the {\em transportation distance\/} metric from
dynamical systems theory to further characterize the
differences between the data trace and the simulated
time series, and estimate errors by {\em
bootstrapping}. The experimental results show that
MMPPs with a certain number of states are successful to
a certain extent in simulating the job traffic at
different levels, fitting both the interarrival time
distribution and the autocorrelation function. However,
MMPPs are not able to match the autocorrelations for
certain VOs, in which strong deterministic
semi-periodic patterns are observed. These patterns are
further characterized using different representations.
Future work is needed to model both deterministic and
stochastic components in order to better capture the
correlation structure in the series.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kadayif:2007:MID,
author = "Ismail Kadayif and Mahmut Kandemir",
title = "Modeling and improving data cache reliability: 1",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "12--12",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Soft errors arising from energetic particle strikes
pose a significant reliability concern for computing
systems, especially for those running in noisy
environments. Technology scaling and aggressive leakage
control mechanisms make the problem caused by these
transient errors even more severe. Therefore, it is
very important to employ reliability enhancing
mechanisms in processor/memory designs to protect them
against soft errors. To do so, we first need to model
soft errors, and then study cost/reliability tradeoffs
among various reliability enhancing techniques based on
the model so that system requirements could be
met.\par
Since cache memories take the largest fraction of
on-chip real estate today and their share is expected
to continue to grow in future designs, they are more
vulnerable to soft errors, as compared to many other
components of a computing system. In this paper, we
first focus on a soft error model for L1 data caches,
and then explore different reliability enhancing
mechanisms. More specifically, we define a metric
called AVFC (Architectural Vulnerability Factor for
Caches), which represents the probability with which a
fault in the cache can be visible in the final output
of the program. Based on this model, we then propose
three architectural schemes for improving reliability
in the existence of soft errors. Our first scheme
prevents an error from propagating to the lower levels
in the memory hierarchy by not forwarding the
unmodified data words of a dirty cache block to the L2
cache when the dirty block is to be replaced. The
second scheme proposed selectively invalidates cache
blocks to reduce their vulnerable periods, decreasing
their chances of catching any soft errors. Based on the
AVFC metric, our experimental results show that these
two schemes are very effective in alleviating soft
errors in the L1 data cache. Specifically, by using our
first scheme, it is possible to improve the AVFC metric
by 32\% without any performance loss. On the other
hand, the second scheme enhances the AVFC metric
between 60\% and 97\%, at the cost of a performance
degradation which varies from 0\% to 21.3\%, depending
on how aggressively the cache blocks are invalidated.
To reduce the performance overhead caused by cache
block invalidation, we also propose a third scheme
which tries to bring a fresh copy of the invalidated
block into the cache via prefetching. Our experimental
results indicate that, this scheme can reduce the
performance overheads to less than 1\% for all
applications in our experimental suite, at the cost of
giving up a tolerable portion of the reliability
enhancement the second scheme achieves.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data caches; data integrity; reliability; soft errors;
vulnerability factors",
}
@Article{Gulati:2007:PAC,
author = "Ajay Gulati and Arif Merchant and Peter J. Varman",
title = "{pClock}: an arrival curve based approach for {QoS}
guarantees in shared storage systems",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "13--24",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Storage consolidation is becoming an attractive
paradigm for data organization because of the economies
of sharing and the ease of centralized management.
However, sharing of resources is viable only if
applications can be isolated from each other. This work
targets the problem of providing performance guarantees
to an application irrespective of the behavior of other
workloads. Application requirements are represented in
terms of the average throughput, latency and maximum
burst size. Most earlier schemes only do weighted
bandwidth allocation; schemes that provide control of
latency either cannot handle bursts or penalize
applications for their own prior behavior, such as
using spare capacity.\par
Our algorithm $p$ Clock is based on arrival curves that
intuitively capture the bandwidth and burst
requirements of applications. We show analytically that
an application following its arrival curve never misses
its deadline. We have implemented $p$ Clock both in
DiskSim and as a module in the Linux kernel 2.6. Our
evaluation shows three important features of $p$ Clock:
(1) benefits over existing algorithms; (2) efficient
performance isolation and burst handling; and (3) the
ability to allocate spare capacity to either speed up
some applications or to a background utility, such as
backup. $p$ Clock can be efficiently implemented in a
system without much overhead.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "burst handling; fair scheduling; QoS; real time
guarantees; resource allocation; storage performance
virtualization",
}
@Article{Iyer:2007:QPA,
author = "Ravi Iyer and Li Zhao and Fei Guo and Ramesh Illikkal
and Srihari Makineni and Don Newell and Yan Solihin and
Lisa Hsu and Steve Reinhardt",
title = "{QoS} policies and architecture for cache\slash memory
in {CMP} platforms",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "25--36",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As we enter the era of CMP platforms with multiple
threads/cores on the die, the diversity of the
simultaneous workloads running on them is expected to
increase. The rapid deployment of virtualization as a
means to consolidate workloads on to a single platform
is a prime example of this trend. In such scenarios,
the quality of service (QoS) that each individual
workload gets from the platform can widely vary
depending on the behavior of the simultaneously running
workloads. While the number of cores assigned to each
workload can be controlled, there is no hardware or
software support in today's platforms to control
allocation of platform resources such as cache space
and memory bandwidth to individual workloads. In this
paper, we propose a QoS-enabled memory architecture for
CMP platforms that addresses this problem. The
QoS-enabled memory architecture enables more cache
resources (i.e. space) and memory resources (i.e.
bandwidth) for high priority applications based on
guidance from the operating environment. The
architecture also allows dynamic resource reassignment
during run-time to further optimize the performance of
the high priority application with minimal degradation
to low priority. To achieve these goals, we will
describe the hardware/software support required in the
platform as well as the operating environment (O/S and
virtual machine monitor). Our evaluation framework
consists of detailed platform simulation models and a
QoS-enabled version of Linux. Based on evaluation
experiments, we show the effectiveness of a QoS-enabled
architecture and summarize key findings/trade-offs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache/memory; CMP; performance; QoS; quality of
service; resource sharing principles; service level
agreements",
}
@Article{Mesnier:2007:MRF,
author = "Michael P. Mesnier and Matthew Wachs and Raja R.
Sambasivan and Alice X. Zheng and Gregory R. Ganger",
title = "Modeling the relative fitness of storage",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "37--48",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254887",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Relative fitness is a new black-box approach to
modeling the performance of storage devices. In
contrast with an absolute model that predicts the
performance of a workload on a given storage device, a
relative fitness model predicts performance {\em
differences\/} between a pair of devices. There are two
primary advantages to this approach. First, because are
lative fitness model is constructed for a device pair,
the application-device feedback of a closed workload
can be captured (e.g., how the I/O arrival rate changes
as the workload moves from device A to device B).
Second, a relative fitness model allows performance and
resource utilization to be used in place of workload
characteristics. This is beneficial when workload
characteristics are difficult to obtain or concisely
express (e.g., rather than describe the spatio-temporal
characteristics of a workload, one could use the
observed cache behavior of device A to help predict the
performance of B).\par
This paper describes the steps necessary to build a
relative fitness model, with an approach that is
general enough to be used with any black-box modeling
technique. We compare relative fitness models and
absolute models across a variety of workloads and
storage devices. On average, relative fitness models
predict bandwidth and throughput within 10-20\% and can
reduce prediction error by as much as a factor of two
when compared to absolute models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "black-box; CART; modeling; storage",
}
@Article{Wen:2007:FFI,
author = "Zhihua Wen and Sipat Triukose and Michael Rabinovich",
title = "Facilitating focused {Internet} measurements",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "49--60",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254889",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes our implementation of and initial
experiences with DipZoom (for `Deep Internet
Performance Zoom'), a novel approach to provide
focused, on-demand Internet measurements. Unlike
existing approaches that face a difficult challenge of
building a measurement platform with sufficiently
diverse measurements and measuring hosts, DipZoom
implements a matchmaking service instead, which uses
P2P concepts to bring together experimenters in need of
measurements with external measurement providers.
DipZoom offers the following two main contributions.
First, since it is just a facilitator for an open
community of participants, it promises unprecedented
availability of diverse measurements and measuring
points. Second, it can be used as a veneer over
existing measurement platforms, automating the planning
and execution of complex measurements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet measurement infrastructures; network
measurements; peer-to-peer systems",
}
@Article{Huang:2007:DND,
author = "Yiyi Huang and Nick Feamster and Anukool Lakhina and
Jim (Jun) Xu",
title = "Diagnosing network disruptions with network-wide
analysis",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "61--72",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254890",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "To maintain high availability in the face of changing
network conditions, network operators must quickly
detect, identify, and react to events that cause
network disruptions. One way to accomplish this goal is
to monitor routing dynamics, by analyzing routing
update streams collected from routers. Existing
monitoring approaches typically treat streams of
routing updates from different routers as independent
signals, and report only the `loud' events (i.e.,
events that involve large volume of routing messages).
In this paper, we examine BGP routing data from all
routers in the Abilene backbone for six months and
correlate them with a catalog of all known disruptions
to its nodes and links. We find that many important
events are not loud enough to be detected from a single
stream. Instead, they become detectable only when
multiple BGP update streams are simultaneously
examined. This is because routing updates exhibit {\em
network-wide\/} dependencies.\par
This paper proposes using network-wide analysis of
routing information to diagnose (i.e., detect and
identify) network disruptions. To detect network
disruptions, we apply a multivariate analysis technique
on dynamic routing information, (i.e., update traffic
from all the Abilene routers) and find that this
technique can detect every reported disruption to nodes
and links within the network with a low rate of false
alarms. To identify the type of disruption, we jointly
analyze both the network-wide static configuration and
details in the dynamic routing updates; we find that
our method can correctly explain the scenario that
caused the disruption. Although much work remains to
make network-wide analysis of routing data
operationally practical, our results illustrate the
importance and potential of such an approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "anomaly detection; network management; statistical
inference",
}
@Article{Pucha:2007:UND,
author = "Himabindu Pucha and Ying Zhang and Z. Morley Mao and
Y. Charlie Hu",
title = "Understanding network delay changes caused by routing
events",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "73--84",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254891",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network delays and delay variations are two of the
most important network performance metrics directly
impacting real-time applications such as voice over IP
and time-critical financial transactions. This
importance is illustrated by past work on understanding
the delay constancy of Internet paths and recent work
on predicting network delays using virtual coordinate
systems. Merely understanding currently observed delays
is insufficient, as network performance can degrade not
only due to traffic variability but also as a result of
routing changes. Unfortunately this latter effect so
far has been ignored in understanding and predicting
delay related performance metrics of Internet paths.
Our work is the first to address this short coming by
systematically analyzing changes in network delays and
jitter of a diverse and comprehensive set of Internet
paths. Using empirical measurements, we illustrate that
routing changes can result in roundtrip delay increase
of converged paths by more than 1 second. Surprisingly,
intradomain routing changes can also cause such large
delay increase.\par
Given these observations, we develop a framework to
analyze in detail the impact of routing changes on
network delays between end-hosts. Using topology
information and properties associated with routing
changes, we explain the causes for observed delay
fluctuations and more importantly identify routing
changes that lead to predictable effects on
delay-related metrics. Using our framework, we study
the predictability of delay and jitter changes in
response to both passively observed interdomain and
actively measured intradomain routing changes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network delay changes; network jitter changes; routing
dynamics; routing events",
}
@Article{Kashyap:2007:TPR,
author = "Abhishek Kashyap and Sudipta Sengupta and Randeep
Bhatia and M. Kodialam",
title = "Two-phase routing, scheduling and power control for
wireless mesh networks with variable traffic",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "85--96",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254893",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of joint routing, scheduling
and transmission power assignment in multi-hop wireless
mesh networks with unknown traffic. We assume the
traffic is unknown, but the traffic matrix, which
specifies the traffic load between every
source-destination pair in the network, always lies
inside a polytope defined by {\em hose\/} model
constraints. The objective is to minimize the maximum
of the total transmission power in the network over all
traffic matrices in a given polytope. We propose
efficient algorithms that compute a two-phase routing,
schedule and power assignment, and prove the solution
to be 3-approximation with respect to an optimal
two-phase routing, scheduling and power assignment. We
show via extensive simulations that the proposed
algorithm has good performance at its worst operating
traffic compared to an algorithm optimized for that
traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "power control; scheduling; two-phase routing; variable
traffic; wireless mesh networks",
}
@Article{Mirza:2007:MLA,
author = "Mariyam Mirza and Joel Sommers and Paul Barford and
Xiaojin Zhu",
title = "A machine learning approach to {TCP} throughput
prediction",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "97--108",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254894",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TCP {\em throughput prediction\/} is an important
capability in wide area overlay and multi-homed
networks where multiple paths may exist between data
sources and receivers. In this paper we describe a new,
lightweight method for TCP throughput prediction that
can generate accurate forecasts for a broad range of
file sizes and path conditions. Our method is based on
Support Vector Regression modeling that uses a
combination of prior file transfers and measurements of
simple path properties. We calibrate and evaluate the
capabilities of our throughput predictor in an
extensive set of lab-based experiments where ground
truth can be established for path properties using
highly accurate passive measurements. We report the
performance for our method in the ideal case of using
our passive path property measurements over a range of
test configurations. Our results show that for bulk
transfers in heavy traffic, TCP throughput is predicted
within 10\% of the actual value 87\% of the time,
representing nearly a 3-fold improvement in accuracy
over prior history-based methods. In the same lab
environment, we assess our method using less accurate
active probe measurements of path properties, and show
that predictions can be made within 10\% of the actual
value nearly 50\% of the time over a range of file
sizes and traffic conditions. This result represents
approximately a 60\% improvement over history-based
methods with a much lower impact on end-to-end paths.
Finally, we implement our predictor in a tool called
{\em PathPerf\/} and test it in experiments conducted
on wide area paths. The results demonstrate that {\em
PathPerf\/} predicts TCP through put accurately over a
variety of paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active measurements; machine learning; support vector
regression; TCP throughput prediction",
}
@Article{Ringberg:2007:SPT,
author = "Haakon Ringberg and Augustin Soule and Jennifer
Rexford and Christophe Diot",
title = "Sensitivity of {PCA} for traffic anomaly detection",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "109--120",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254895",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Detecting anomalous traffic is a crucial part of
managing IP networks. In recent years, network-wide
anomaly detection based on Principal Component Analysis
(PCA) has emerged as a powerful method for detecting a
wide variety of anomalies. We show that tuning PCA to
operate effectively in practice is difficult and
requires more robust techniques than have been
presented thus far. We analyze a week of network-wide
traffic measurements from two IP backbones (Abilene and
Geant) across three different traffic aggregations
(ingress routers, OD flows, and input links), and
conduct a detailed inspection of the feature time
series for each suspected anomaly. Our study identifies
and evaluates four main challenges of using PCA to
detect traffic anomalies: (i) the false positive rate
is very sensitive to small differences in the number of
principal components in the normal subspace, (ii) the
effectiveness of PCA is sensitive to the level of
aggregation of the traffic measurements, (iii) a large
anomaly may in advertently pollute the normal subspace,
(iv) correctly identifying which flow triggered the
anomaly detector is an inherently challenging
problem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network traffic analysis; principal component
analysis; traffic engineering",
}
@Article{Lee:2007:BCS,
author = "Seungjoon Lee and Dave Levin and Vijay Gopalakrishnan
and Bobby Bhattacharjee",
title = "Backbone construction in selfish wireless networks",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "121--132",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254896",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a protocol to construct routing backbones
in wireless networks composed of selfish participants.
Backbones are inherently cooperative, so constructing
them in selfish environments is particularly difficult;
participants want a backbone to exist (so others relay
their packets) but do not want to join the backbone (so
they do not have to relay packets for others).\par
We model the wireless backbone as a public good and use
impatience as an incentive for cooperation. To
determine if and when to donate to this public good,
each participant calculates how patient it should be in
obtaining the public good. We quantify patience using
the Volunteer's Timing Dilemma (VTD), which we extend
to general multihop network settings. Using our
generalized VTD analysis, each node individually
computes as its dominant strategy the amount of time to
wait before joining the backbone. We evaluate our
protocol using both simulations and an implementation.
Our results show that, even though participants in our
system deliberately wait before volunteering, a
backbone is formed quickly. Further, the quality of the
backbone (such as the size and resulting network
lifetime) is comparable to that of existing backbone
protocols that assume altruistic behavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "incentives; public good; selfish network; volunteer's
dilemma; wireless backbone",
}
@Article{Xia:2007:SFJ,
author = "Cathy H. Xia and Zhen Liu and Don Towsley and Marc
Lelarge",
title = "Scalability of fork\slash join queueing networks with
blocking",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "133--144",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254898",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates how the through put of a
general fork-join queueing network with blocking
behaves as the number of nodes increases to infinity
while the processing speed and buffer space of each
node stay unchanged. The problem is motivated by
applications arising from distributed systems and
computer networks. One example is large-scale
distributed stream processing systems where TCP is used
as the transport protocol for data transfer in between
processing components. Other examples include reliable
multicast in overlay networks, and reliable data
transfer in ad hoc networks. Using an analytical
approach, the paper establishes bounds on the
asymptotic throughput of such a network. For a subclass
of networks which are balanced, we obtain sufficient
conditions under which the network stays scalable in
the sense that the throughput is lower bounded by a
positive constant as the network size increases.
Necessary conditions of throughput scalability are
derived for general networks. The special class of
series-parallel networks is then studied in greater
detail, where the asymptotic behavior of the throughput
is characterized.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic analysis; blocking; fork and join; queueing
networks; scalability; throughput",
}
@Article{Osogami:2007:OSC,
author = "Takayuki Osogami and Sei Kato",
title = "Optimizing system configurations quickly by guessing
at the performance",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "145--156",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254899",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of a Web system can be greatly
improved by tuning its configuration parameters.
However, finding the optimal configuration has been a
time-consuming task due to the long measurement time
needed to evaluate the performance of a given
configuration. We propose an algorithm, which we refer
to as Quick Optimization via Guessing (QOG), that
quickly selects one of nearly best configurations with
high probability. The key ideas in QOG are (i) the
measurement of a configuration is terminated as soon as
the configuration is found to be suboptimal, and (ii)
the performance of a configuration is guessed at based
on the measured similar configurations, so that the
better configurations are more likely to be measured
before the others. If the performance of a good
configuration has been measured, a poor configuration
will be quickly found to be suboptimal with short
measurement time. We apply QOG to optimizing the
configuration of a real Web system, and find that QOG
can drastically reduce the total measurement time
needed to select the best configuration. Our
experiments also illuminate several interesting
properties of QOG specifically when it is applied to
optimizing Web systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "configuration parameters; performance optimization;
ranking and selection; regression; web system",
}
@Article{Wang:2007:SSR,
author = "Zhe Wang and Wei Dong and William Josephson and Qin Lv
and Moses Charikar and Kai Li",
title = "Sizing sketches: a rank-based analysis for similarity
search",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "157--168",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254900",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sketches are compact data structures that can be used
to estimate properties of the original data in building
large-scale search engines and data analysis systems.
Recent theoretical and experimental studies have shown
that sketches constructed from feature vectors using
randomized projections can effectively approximate L1
distance on the feature vectors with the Hamming
distance on their sketches. Furthermore, such sketches
can achieve good filtering accuracy while reducing the
metadata space requirement and speeding up similarity
searches by an order of magnitude. However, it is not
clear how to choose the size of the sketches since it
depends on data type, dataset size, and desired
filtering quality. In real systems designs, it is
necessary to understand how to choose sketch size
without the dataset, or at least without the whole
dataset.\par
This paper presents an analytical model and
experimental results to help system designers make such
design decisions. We present a rank-based filtering
model that describes the relationship between sketch
size and data set size based on the dataset distance
distribution. Our experimental results with several
datasets including images, audio, and 3D shapes show
that the model yields good, conservative predictions.
We show that the parameters of the model can be set
with a small sample data set and the resulting model
can make good predictions for a large dataset. We
illustrate how to apply the approach with a concrete
example.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "feature-rich data; similarity search; sketch",
}
@Article{Park:2007:MEP,
author = "Soyeon Park and Weihang Jiang and Yuanyuan Zhou and
Sarita Adve",
title = "Managing energy-performance tradeoffs for
multithreaded applications on multiprocessor
architectures",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "169--180",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254902",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In modern computers, non-performance metrics such as
energy consumption have become increasingly important,
requiring tradeoff with performance. A recent work has
proposed performance-guaranteed energy management, but
it is designed specifically for sequential applications
and cannot be used to a large class of multithreaded
applications running on high end computers and data
servers.\par
To address the above problem, this paper makes the
first attempt to provide performance-guaranteed energy
management for multithreaded applications on
multiprocessor architectures. We first conduct a
comprehensive study on the effects of energy adaptation
on thread synchronizations and show that a
multithreaded application suffers from not only local
slowdowns due to energy adaptation, but also
significant slowdowns propagated from other threads
because of synchronization. Based on these findings, we
design three Synchronization-Aware (SA) algorithms, LWT
(Lock Waiting Time-based), CSL (Critical Section
Length-based) and ODP (Operation Delay
Propagation-based) algorithms, to estimate the energy
adaptation-induced slowdowns on each thread. The local
slowdowns are then combined across multiple threads via
three aggregation methods (MAX, AVG and SUM) to
estimate the overall application slowdown.\par
We evaluate our methods using a large multithreaded
commercial application, IBM DB2 with
industrial-strength online transaction processing
(OLTP) workloads, and six SPLASH parallel scientific
applications. Our experimental results show that LWT
combined with the MAX aggregation method not only
controls the performance slow down within the specified
limits but also conserves the most energy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy and performance tradeoffs; low power design;
memory energy management; multithreaded applications",
}
@Article{Cvetkovski:2007:AAC,
author = "Andrej Cvetkovski",
title = "An algorithm for approximate counting using limited
memory resources",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "181--190",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254903",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a randomized algorithm for
approximate counting that preserves the same modest
memory requirements of log(log n) bits per counter as
the approximate counting algorithm introduced in the
seminal paper of R. Morris (1978), and in addition, is
characterized by (i) lower expected number of memory
accesses and (ii) lower standard error on more than 99
percent of its counting range. An exact analysis of the
relevant statistical properties of the algorithm is
carried out. Performance evaluation via simulations is
also provided to validate the presented
theory.\par
Given its properties, the presented algorithm is
suitable as a basic building block of data streaming
applications having a large number of simultaneous
counters and/or operating at very high speeds. As such,
it is applicable to a wide range of measurement and
monitoring operations, including performance monitoring
of communication hardware, measurements for
optimization in large database systems, and gathering
statistics for data compression.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate counting; data streaming; network
monitoring",
}
@Article{Lee:2007:SDN,
author = "Eric S. Lee and Thom Whalen",
title = "Synthetic designs: a new form of true experimental
design for use in information systems development",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "191--202",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254904",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer scientists and software engineers seldom rely
on using experimental methods despite frequent calls to
do so. The problem may lie with the shortcomings of
traditional experimental methods. We introduce a new
form of experimental designs, synthetic designs, which
address these shortcomings. Compared with classical
experimental designs (between-subjects,
within-subjects, and matched-subjects), synthetic
designs can offer substantial reductions in sample
sizes, cost, time and effort expended, increased
statistical power, and fewer threats to validity
(internal, external, and statistical conclusion). This
new design is a variation of within-subjects design in
which each system user serves in only a single
treatment condition. System performance scores for all
other treatment conditions are derived synthetically
without repeated testing of each subject. This design,
though not applicable in all situations, can be used in
the development and testing of some computer systems
provided that user behavior is unaffected by the
version of computer system being used. We justify
synthetic designs on three grounds: this design has
been used successfully in the development of
computerized mug shot systems, showing marked
advantages over traditional designs; a detailed
comparison with traditional designs showing their
advantages on 17 of the 18 criteria considered; and an
assessment showing these designs satisfy all the
requirements of true experiments (albeit in a novel
way).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "experimental designs; synthetic experimental designs",
}
@Article{Feng:2007:PUP,
author = "Hanhua Feng and Vishal Misra and Dan Rubenstein",
title = "{PBS}: a unified priority-based scheduler",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "203--214",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254906",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Blind scheduling policies schedule tasks without
knowledge of the tasks' remaining processing times.
Existing blind policies, such as FCFS, PS, and LAS,
have proven useful in network and operating system
applications, but each policy has a separate, vastly
differing description, leading to separate and distinct
implementations. This paper presents the design and
implementation of a configurable blind scheduler that
contains a continuous, tunable parameter. By merely
changing the value of this parameter, the scheduler's
policy exactly emulates or closely approximates several
existing standard policies. Other settings enable
policies whose behavior is a hybrid of these standards.
We demonstrate the practical benefits of such a {\em
configurable\/} scheduler by implementing it into the
Linux operating system. We show that we can emulate the
behavior of Linux's existing, more complex scheduler
with a single (hybrid) setting of the parameter. We
also show, using synthetic workloads, that the best
value for the tunable parameter is not unique, but
depends on distribution of the size of tasks arriving
to the system. Finally, we use our formulation of the
configurable scheduler to contrast the behavior of
various blind schedulers by exploring how various
properties of the scheduler change as we vary our
scheduler's tunable parameter.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "FCFS; LAS; Linux; PBS; queueing systems; scheduling",
}
@Article{Jelenkovic:2007:ASC,
author = "Predrag R. Jelenkovic and Xiaozhu Kang and Jian Tan",
title = "Adaptive and scalable comparison scheduling",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "215--226",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254907",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Shortest Remaining Processing Time (SRPT)
scheduling discipline is optimal and its superior
performance, compared with the policies that do not use
the knowledge of job sizes, can be quantified using
mean-value analysis as well as our new asymptotic
distribution allimits for the relatively smaller
heavy-tailed jobs. However, the main difficulty in
implementing SRPT in large practical systems, e.g., Web
servers, is that its complexity grows with the number
of jobs in the queue. Hence, in order to lower the
complexity, it is natural to approximate SRPT by
grouping the arrivals into a fixed (small) number of
classes containing jobs of approximately equal size and
then serve the classes of smaller jobs with higher
priorities.\par
In this paper, we design a novel adaptive grouping
mechanism based on relative size comparison of a newly
arriving job to the preceding $m$ arrivals.
Specifically, if the newly arriving job is smaller than
$k$ and larger than $ m - k$ of the previous $m$ jobs,
it is routed into class $k$. The excellent performance
of this mechanism,even for a small number of classes $
m + 1$, is demonstrated using both the asymptotic
queueing analysis under heavy tails and extensive
simulations. We also discuss refinements of the
comparison grouping mechanism that improve the accuracy
of job classification at the expense of a small
additional complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adaptive thresholds; comparison scheduling; M/G/1;
scalability",
}
@Article{Bhadra:2007:OCP,
author = "Sandeep Bhadra and Yingdong Lu and Mark S.
Squillante",
title = "Optimal capacity planning in stochastic loss networks
with time-varying workloads",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "227--238",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254909",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a capacity planning optimization problem
in a general theoretical framework that extends the
classical Erlang loss model and related stochastic loss
networks to support time-varying workloads. The time
horizon consists of a sequence of coarse time
intervals, each of which involves a stochastic loss
network under a fixed multi-class workload that can
change in a general manner from one interval to the
next. The optimization problem consists of determining
the capacities for each time interval that maximize a
utility function over the entire time horizon, finite
or infinite, where rewards gained from servicing
customers are offset by penalties associated with
deploying capacities in an interval and with changing
capacities among intervals. We derive a state-dependent
optimal policy within the context of a particular
limiting regime of the optimization problem, and we
prove this solution to be asymptotically optimal. Then,
under fairly mild conditions, we prove that a similar
structural property holds for the optimal solution of
the original stochastic optimization problem, and we
show how the optimal capacities comprising this
solution can be efficiently computed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic optimality; capacity planning; Erlang
fixed-point approximation; Erlang loss formula;
stochastic dynamic programming; stochastic loss
networks; time-varying workloads",
}
@Article{Liu:2007:FLS,
author = "Jiaping Liu and Alexandre Prouti{\`e}re and Yung Yi
and Mung Chiang and H. Vincent Poor",
title = "Flow-level stability of data networks with non-convex
and time-varying rate regions",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "239--250",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254910",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we characterize flow-level stochastic
stability for networks with non-convex or time-varying
rate regions under resource allocation based on utility
maximization. Similar to prior works on flow-level
stability, we consider exogenous data arrivals with
finite workloads. However, to model many realistic
situations, the rate region, which constrains the
feasibility of resource allocation, may be either
non-convex or time-varying. When the rate region is
fixed but non-convex, we derive sufficient and
necessary conditions for stability, which coincide when
the set of allocated rate vectors has continuous
contours. When the rate region is time-varying
according to some stationary, ergodic process, we
derive the precise stability region. In both cases,the
size of the stability region depends on the resource
allocation policy, in particular, on the fairness
parameter in $ \propto $-fair utility maximization.
This is in sharp contrast with the substantial existing
literature on stability under fixed and convex rate
regions, in which the stability region coincides with
the rate region for many utility-based resource
allocation schemes, independently of the value of the
fairness parameter. We further investigate the tradeoff
between fairness and stability when rate region is
non-convex or time-varying. Numerical examples of both
wired and wireless networks are provided to illustrate
the new stability regions and tradeoffs proved in the
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fairness; network utility maximization; resource
allocation; stability",
}
@Article{Smirni:2007:FDP,
author = "Evgenia Smirni and Frederica Darema and Albert
Greenberg and Adolfy Hoisie and Don Towsley",
title = "Future directions in performance evaluation research",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "251--252",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254912",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dong:2007:WSP,
author = "Qunfeng Dong and Suman Banerjee and Jia Wang and
Dheeraj Agrawal",
title = "Wire speed packet classification without {TCAMs}: a
few more registers (and a bit of logic) are enough",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "253--264",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254914",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Packet classification is the foundation of many
Internet functions such as QoS and security. A long
thread of research has proposed efficient
software-based solutions to this problem. Such software
solutions are attractive because they require cheap
memory systems for implementation, thus bringing down
the overall cost of the system. In contrast,
hardware-based solutions use more expensive memory
systems, e.g., TCAMs, but are often preferred by router
vendors for their faster classification speeds. The
goal of this paper is to find a `best-of-both-worlds'
solution --- a solution that incurs the cost of a
software-based system and has the speed of a
hardware-based one. Our proposed solution, called {\em
smart rule cache\/} achieves this goal by using minimal
hardware --- a few additional registers --- to cache
{\em evolving\/} rules which preserve classification
semantics, and additional logic to match incoming
packets to these rules. Using real traffic traces and
real rule sets from a tier-1 ISP, we show such a setup
is sufficient to achieve very high hit ratios for fast
classification in hardware. Cache miss ratios are 2--4
orders of magnitude lower than flow cache schemes.
Given its low cost and good performance, we believe our
solution may create significant impact on current
industry practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; rule cache; rule evolution",
}
@Article{Hirzel:2007:DLO,
author = "Martin Hirzel",
title = "Data layouts for object-oriented programs",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "265--276",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254915",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Object-oriented programs rely heavily on objects and
pointers, making them vulnerable to slow downs from
cache and TLB misses. The cache and TLB behavior
depends on the data layout of objects in memory. There
are many possible data layouts with different impacts
on performance, but it is not known which perform
better. This paper presents a novel framework for
evaluating data layouts. The framework both makes
implementing many layouts easy, and enables performance
measurements of real programs using a product Java
virtual machine on stock hardware. This is achieved by
sorting objects during copying garbage collection;
outside of garbage collection, program performance is
solely determined by the data layout that the sort key
implements. This paper surveys and evaluates 10 common
data layouts with 32 realistic bench mark programs
running on 3 different hardware configurations. The
results confirm the importance of data layouts for
program performance, and show that almost all layouts
yield the best performance for some programs and the
worst performance for others.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache; data layout; data placement; GC; hardware
performance counters; memory subsystem; spatial
locality; TLB",
}
@Article{Hao:2007:BHA,
author = "Fang Hao and Murali Kodialam and T. V. Lakshman",
title = "Building high accuracy {Bloom} filters using
partitioned hashing",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "277--288",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254916",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The growing importance of operations such as
packet-content inspection, packet classification based
on non-IP headers, maintaining flow-state, etc. has led
to increased interest in the networking applications of
Bloom filters. This is because Bloom filters provide a
relatively easy method for hardware implementation of
set-membership queries. However, the tradeoff is that
Bloom filters only provide a probabilistic test and
membership queries can result in false positives.
Ideally, we would like this false positive probability
to be very low. The main contribution of this paper is
a method for significantly reducing this false positive
probability in comparison to existing schemes. This is
done by developing a {\em partitioned hashing\/} method
which results in a choice of hash functions that set
far fewer bits in the Bloom filter bit vector than
would be the case otherwise. This lower fill factor of
the bit vector translates to a much lower false
positive probability. We show experimentally that this
improved choice can result in as much as a ten-fold
increase in accuracy over standard Bloom filters. We
also show that the scheme performs much better than
other proposed schemes for improving Bloom filters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bloom filter; hashing",
}
@Article{Bairavasundaram:2007:ALS,
author = "Lakshmi N. Bairavasundaram and Garth R. Goodson and
Shankar Pasupathy and Jiri Schindler",
title = "An analysis of latent sector errors in disk drives",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "289--300",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254917",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The reliability measures in today's disk drive-based
storage systems focus predominantly on protecting
against complete disk failures. Previous disk
reliability studies have analyzed empirical data in an
attempt to better understand and predict disk failure
rates. Yet, very little is known about the incidence of
latent sector errors i.e., errors that go undetected
until the corresponding disk sectors are
accessed.\par
Our study analyzes data collected from production
storage systems over 32 months across 1.53 million
disks (both nearline and enterprise class). We analyze
factors that impact latent sector errors, observe
trends, and explore their implications on the design of
reliability mechanisms in storage systems. To the best
of our knowledge, this is the first study of such large
scale our sample size is at least an order of magnitude
larger than previously published studies and the first
one to focus specifically on latent sector errors and
their implications on the design and reliability of
storage systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk drive reliability; latent sector errors; MTTDL",
}
@Article{Legout:2007:CSI,
author = "Arnaud Legout and Nikitas Liogkas and Eddie Kohler and
Lixia Zhang",
title = "Clustering and sharing incentives in {BitTorrent}
systems",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "301--312",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254919",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peer-to-peer protocols play an increasingly
instrumental role in Internet content distribution. It
is therefore important to gain a complete understanding
of how these protocols behave in practice and how their
operating parameters affect overall system performance.
This paper presents the first detailed experimental
investigation of the peer selection strategy in the
popular BitTorrent protocol. By observing more than 40
nodes in instrumented private torrents, we validate
three protocol properties that, though believed to
hold, have not been previously demonstrated
experimentally: the clustering of similar-bandwidth
peers, the effectiveness of BitTorrent's sharing
incentives, and the peers' high uplink utilization. In
addition, we observe that BitTorrent's modified choking
algorithm in seed state provides uniform service to all
peers, and that an underprovisioned initial seed leads
to absence of peer clustering and less effective
sharing incentives. Based on our results, we provide
guidelines for seed provisioning by content providers,
and discuss a tracker protocol extension that addresses
an identified limitation of the protocol.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; choking algorithm; clustering; incentives;
seed provisioning",
}
@Article{Sanghavi:2007:DLS,
author = "Sujay Sanghavi and Loc Bui and R. Srikant",
title = "Distributed link scheduling with constant overhead",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "313--324",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254920",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper proposes a new class of simple, distributed
algorithms for scheduling in wireless networks. The
algorithms generate new schedules in a distributed
manner via simple local changes to existing schedules.
The class is parameterized by integers $k$ \geq 1. We
show that algorithm $k$ of our class achieves $ k / (k
+ 2)$ of the capacity region, for every $ k \geq
1$.\par
The algorithms have small and constant worst-case
overheads: in particular, algorithm $k$ generates a new
schedule using (a) time less than $ 4 k + 2$ round-trip
times between neighboring nodes in the network, and (b)
at most three control transmissions by any given node,
for any $k$. The control signals are explicitly
specified, and face the same interference effects as
normal data transmissions. Our class of distributed
wireless scheduling algorithms are the first ones
guaranteed to achieve any fixed fraction of the
capacity region while using small and constant
overheads that do not scale with network size. The
parameter $k$ explicitly captures the tradeoff between
control overhead and scheduler throughput performance
and provides a tuning knob protocol designers can use
to harness this trade-off in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "matchings; primary interference; scheduling; wireless
networks",
}
@Article{Rajendran:2007:TBC,
author = "Raj Kumar Rajendran and Vishal Misra and Dan
Rubenstein",
title = "Theoretical bounds on control-plane self-monitoring in
routing protocols",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "325--336",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254921",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The distributed routing protocols in use today promise
to operate correctly only if all nodes implement the
protocol faithfully. A small insignificant set of nodes
have, in the past, brought an entire network to a
standstill by reporting incorrect route information.
The damage caused by these erroneous reports, in some
instances, could have been contained since incorrect
route reports sometimes reveal themselves as
inconsistencies in the state-information of correctly
functioning nodes. By checking for such inconsistencies
and taking preventive action, such as disregarding
selected route-reports, a correctly functioning node
could have limited the damage caused by the
malfunctioning nodes.\par
Our theoretical study attempts to understand when a
correctly functioning node can, by analysing its
routing-state, detect that some node is misimplementing
route selection. We present a methodology, called
Strong-Detection that helps answer the question. We
then apply Strong-Detection to three classes of routing
protocols: distance-vector, path-vector, and
link-state. For each class, we derive low-complexity
self-monitoring algorithms that take as input the
routing state and output whether any detectable
anomalies exist. We then use these algorithms to
compare and contrast the self-monitoring power of these
different classes of protocols in relation to the
complexity of the routing-state.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distance vector; misconfiguration; rogue node; routing
protocols",
}
@Article{Yuan:2007:ORF,
author = "Xin Yuan and Wickus Nienaber and Zhenhai Duan and Rami
Melhem",
title = "Oblivious routing for fat-tree based system area
networks with uncertain traffic demands",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "337--348",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254922",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fat-tree based system area networks have been widely
adopted in high performance computing clusters. In such
systems, the routing is often deterministic and the
traffic demand is usually uncertain and changing. In
this paper, we study routing performance on fat-tree
based system area networks with deterministic routing
under the assumption that the traffic demand is
uncertain. The performance of a routing algorithm under
uncertain traffic demands is characterized by the {\em
oblivious performance\/} ratio that bounds the relative
performance of the routing algorithm and the optimal
routing algorithm for any given traffic demand. We
consider both single path routing where the traffic
between each source-destination pair follows one path,
and multi-path routing where multiple paths can be used
for the traffic between a source-destination pair. We
derive lower bounds of the oblivious performance ratio
of any single path routing scheme for fat-tree
topologies and develop single path oblivious routing
schemes that achieve the optimal oblivious performance
ratio for commonly used fat-tree topologies. These
oblivious routing schemes provide the best performance
guarantees among all single path routing algorithms
under uncertain traffic demands. For multi-path
routing, we show that it is possible to obtain a scheme
that is optimal for any traffic demand (an oblivious
performance ratio of 1) on the fat-tree topology. These
results quantitatively demonstrate that single path
routing cannot guarantee high routing performance while
multi-path routing is very effective in balancing
network loads on the fat-tree topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fat-tree; oblivious routing; system area networks",
}
@Article{Nahum:2007:ESS,
author = "Erich M. Nahum and John Tracey and Charles P. Wright",
title = "Evaluating {SIP} server performance",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "349--350",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254924",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SIP is a protocol of growing importance, with uses for
VoIP, instant messaging, presence, and more. However,
its performance is not well-studied or understood. In
this extended abstract we overview our experimental
evaluation of common SIP server scenarios using
open-source SIP software such as OpenSER and SIP
pruning on Linux.\par
We show performance varies greatly depending on the
server scenario and how the protocol is used. Depending
on the configuration, through put can vary from
hundreds to thousands of operations per second. For
example, we observe that the choice of stateless vs.
stateful proxying, using TCP rather than UDP, or
including MD5-based authentication can each can affect
performance by a factor of 2-4. We also provide kernel
and application profiles using Oprofile that help
explain and illustrate processing costs. Finally, we
provide a simple fix for transaction-stateful proxying
that improves performance by a factor of 10. Full
details can be found in our accompanying technical
report.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "experimental evaluation; performance; server; SIP",
}
@Article{Puzak:2007:PS,
author = "Thomas R. Puzak and Allan Hartstein and Viji
Srinivasan and Philip Emma and Arthur Nadas",
title = "Pipeline spectroscopy",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "351--352",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254925",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache; convex combination; cost of a miss; probability
transition matrix",
}
@Article{Cohen:2007:BSB,
author = "Edith Cohen and Haim Kaplan",
title = "Bottom-$k$ sketches: better and more efficient
estimation of aggregates",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "353--354",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254926",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A {\em Bottom-$k$ sketch\/} is a summary of a set of
items with nonnegative weights. Each such summary
allows us to compute approximate aggregates over the
set of items. Bottom-$k$ sketches are obtained by
associating with each item in a ground set an
independent random rank drawn from a probability
distribution that depends on the weight of the item.
For each subset of interest, the bottom-$k$ sketch is
the set of the $k$ minimum ranked items and their
ranks. Bottom-$k$ sketches have numerous applications.
We develop and analyze data structures and estimators
for bottom-$k$ sketches to facilitate their deployment.
We develop novel estimators and algorithms that show
that they are a superior alternative to other sketching
methods in both efficiency of obtaining the sketches
and the accuracy of the estimates derived from the
sketches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "approximate query processing; bottom-k; sampling;
sketches",
}
@Article{Gu:2007:GEM,
author = "Yu Gu and Lee Breslau and Nick G. Duffield and
Subhabrata Sen",
title = "{GRE} encapsulated multicast probing: a scalable
technique for measuring one-way loss",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "355--356",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254927",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop techniques for estimating one-way loss from
a measurement host to network routers which exploit
commonly implemented features on commercial routers and
do not require any new router capabilities. The work
addresses the problem of scalably performing one-way
loss measurements across specific network paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measurement; monitoring; multicast; one-way loss;
performance",
}
@Article{Mirkovic:2007:WSR,
author = "Jelena Mirkovic and Alefiya Hussain and Brett Willson
and Sonia Fahmy and Wei-Min Yao and Peter Reiher and
Stephen Schwab and Roshan Thomas",
title = "When is service really denied?: a user-centric {DoS}
metric",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "357--358",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254928",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Denial-of-service (DoS) research community lacks
accurate metrics to evaluate an attack's impact on
network services, its severity and the effectiveness of
a potential defense. We propose several DoS impact
metrics that measure the quality of service experienced
by end users during an attack, and compare these
measurements to application-specific thresholds. Our
metrics are ideal for testbed experimentation, since
necessary traffic parameters are extracted from packet
traces gathered during an experiment.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "denial of service; measurement; metrics",
}
@Article{Guo:2007:DIM,
author = "Lei Guo and Enhua Tan and Songqing Chen and Zhen Xiao
and Xiaodong Zhang",
title = "Does {Internet} media traffic really follow
{Zipf}-like distribution?",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "359--360",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254929",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is commonly agreed that Web traffic follows the
Zipf-like distribution, which is an analytical
foundation for improving Web access performance by
client-server based proxy caching systems on the
Internet. However, some recent studies have observed
non-Zipf-like distributions of Internet media traffic
in different content delivery systems. Due to the
variety of media delivery systems and the diversity of
media content, existing studies on media traffic are
largely workload specific, and the observed access
patterns are often different from or even conflict with
each other. For Web media systems, study [3] reports
that the access pattern of streaming media is Zipf-like
in a university campus network, while study [2] finds
that it is not Zipf-like in an enterprise media
server.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "media; stretched exponential; Zipf-like",
}
@Article{Hoflehner:2007:CCS,
author = "Gerolf F. Hoflehner and Darshan Desai and Daniel M.
Lavery and Alexandru Nicolau and Alexander V.
Veidenbaum",
title = "Comparative characterization of {SPEC CPU2000} and
{CPU2006} on {Itanium}{\reg} architecture",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "361--362",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254930",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently SPEC1 released the next generation of its CPU
benchmark, widely used by compiler writers and
architects for measuring processor performance. This
calls for characterization of the applications in SPEC
CPU2006 to guide the design of future microprocessors.
In addition, it necessitates assessing the change in
the characteristics of the applications from one suite
to another. Although similar studies using the retired
SPEC CPU benchmark suites have been done in the past,
to the best of our knowledge, a thorough
characterization of CPU2006 and its comparison with
CPU2000 has not been done so far. In this paper, we
present the above; specifically, we analyze IPC
(instructions per cycle), L1, L2 data cache misses and
branch prediction, especially in CPU2006.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "branch prediction; caches; performance evaluation;
SPEC CPU benchmarks",
}
@Article{Lin:2007:PRT,
author = "Bin Lin and Arindam Mallik and Peter A. Dinda and
Gokhan Memik and Robert P. Dick",
title = "Power reduction through measurement and modeling of
users and {CPUs}: summary",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "363--364",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254931",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic voltage and frequency scaling (DVFS);
process-driven voltage scaling (PDVS); user-driven
frequency scaling (UDFS)",
}
@Article{Wang:2007:GRI,
author = "Chong Wang and John W. Byers",
title = "Generating representative {ISP} topologies from
first-principles",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "365--366",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254932",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Understanding and modeling the factors that underlie
the growth and evolution of network topologies are
basic questions that impinge upon capacity planning,
forecasting, and protocol research. Early topology
generation work focused on generating network-wide
connectivity maps, either at the AS-level or the
router-level, typically with an eye towards reproducing
abstract properties of observed topologies. But
recently, advocates of an alternative
`first-principles' approach question the feasibility of
realizing representative topologies with simple
generative models that do not explicitly incorporate
real-world constraints, such as the relative costs of
router configurations, into the model. Our work
synthesizes these two lines by designing a topology
generation mechanism that incorporates first-principles
constraints. Our goal is more modest than that of
constructing an Internet-wide topology: we aim to
generate representative topologies for single ISPs.
However, our methods also go well beyond previous work,
as we annotate these topologies with representative
capacity and latency information. Taking only demand
for network services over a given region as input, we
propose a natural cost model for building and
interconnecting PoPs and formulate the resulting
optimization problem faced by an ISP. We devise
hill-climbing heuristics for this problem and
demonstrate that the solutions we obtain are
quantitatively similar to those in measured
router-level ISP topologies, with respect to both
topological properties and fault-tolerance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network design; network topology modeling;
optimization",
}
@Article{Bissias:2007:BDL,
author = "George Dean Bissias and Brian Neil Levine and Arnold
Rosenberg",
title = "Bounding damage from link destruction, with
application to the {Internet}",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "367--368",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254933",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "graph partitioning; spectral graph theory;
vulnerability",
}
@Article{Erman:2007:SSN,
author = "Jeffrey Erman and Anirban Mahanti and Martin Arlitt
and Ira Cohen and Carey Williamson",
title = "Semi-supervised network traffic classification",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "369--370",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254934",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "semi-supervised learning; traffic classification",
}
@Article{Mi:2007:EMI,
author = "Ningfang Mi and Alma Riska and Qi Zhang and Evgenia
Smirni and Erik Riedel",
title = "Efficient management of idleness in systems",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "371--372",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254935",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "foreground/background scheduling; storage systems",
}
@Article{deJager:2007:AIS,
author = "Douglas V. de Jager and Jeremy T. Bradley",
title = "Asynchronous iterative solution for state-based
performance metrics",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "373--374",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254936",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Solution of large sparse fixed-point problems, Mline
over x = over x and Mline over x + line over b = over
x, may be seen as underpinning many important
performance-analysis calculations. These calculations
include steady-state, passage-time and transient-time
calculations in discrete-time Markov chains,
continuous-time Markov chains and semi-Markov chains.
In recent years, much work has been done to extend the
application of asynchronous iterative fixed-point
solution methods to many different contexts. This work
has been motivated by the potential for faster
solution, more efficient use of the communication
channel and/or access to memory, and simplification of
task management and programming. In this paper, we
present theoretical developments which allow us to
extend the application of asynchronous iterative
solution methods to solve for the key performance
metrics mentioned above-such that we may employ the
full breadth of Chazan and Miranker's classes of
asynchronous iterations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asynchronous iterations; dominant eigenvectors;
matrix-vector splitting; performance analysis;
Perron--Frobenius",
}
@Article{Hoste:2007:ACP,
author = "Kenneth Hoste and Lieven Eeckhout and Hendrik
Blockeel",
title = "Analyzing commercial processor performance numbers for
predicting performance of applications of interest",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "375--376",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254937",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current practice in benchmarking commercial computer
systems is to run a number of industry-standard
benchmarks and to report performance numbers. The huge
amount of machines and the large number of benchmarks
for which performance numbers are published make it
hard to observe clear performance trends though. In
addition, these performance numbers for specific
benchmarks do not provide insight into how applications
of interest that are not part of the benchmark suite
would perform on those machines.\par
In this work we build a methodology for analyzing
published commercial machine performance data sets. We
apply statistical data analysis techniques, more in
particular principal components analysis and cluster
analysis, to reduce the amount of information to a
manageable amount to facilitate its understanding.
Visualizing SPEC CPU2000 performance numbers for 26
benchmarks and 1000+ machines in just a few graphs
gives insight into how commercial machines compare
against each other. In this work we build a methodology
for analyzing published commercial machine performance
data sets. We apply statistical data analysis
techniques, more in particular principal components
analysis and cluster analysis, to reduce the amount of
information to a manageable amount to facilitate its
understanding. Visualizing SPEC CPU2000 performance
numbers for 26 benchmarks and 1000+ machines in just a
few graphs gives insight into how commercial machines
compare against each other.\par
In addition, we provide a way of relating inherent
program behavior to these performance numbers so that
insights can be gained into how the observed
performance trends relate to the behavioral
characteristics of computer programs. This results in a
methodology for the ubiquitous benchmarking problem of
predicting performance of an application of interest
based on its similarities with the benchmarks in a
published industry-standard benchmark suite.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "benchmark similarity; performance analysis;
performance prediction",
}
@Article{He:2007:BSS,
author = "Jiayue He and Augustin Chaintreau",
title = "{BRADO}: scalable streaming through reconfigurable
trees",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "377--378",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254938",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "application layer multicast; network overlays; TCP
tandem",
}
@Article{Nurmi:2007:QQB,
author = "Daniel Charles Nurmi and John Brevik and Rich Wolski",
title = "{QBETS}: queue bounds estimation from time series",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "379--380",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254939",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "batch scheduling; queue prediction; super-computing",
}
@Article{Deng:2007:PDS,
author = "Leiwen Deng and Aleksandar Kuzmanovic",
title = "{Pong}: diagnosing spatio-temporal {Internet}
congestion properties",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "381--382",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1254882.1254940",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ability to accurately detect congestion events in
the Internet and reveal their spatial (i.e., where they
happen?) and temporal (i.e., how frequently they occur
and how long they last?) properties would significantly
improve our understanding of how the Internet operates.
In this paper we present {\em Pong}, a novel
measurement tool capable of effectively diagnosing
congestion events over short (e.g., $ \approx $100ms or
longer) time-scales, and simultaneously locating
congested points within a single hop on an end-to-end
path at the granularity of a single link.\par {\em
Pong\/} (i) uses queuing delay as indicative of
congestion, and (ii) strategically combines end-to-end
probes with those targeted to intermediate nodes.
Moreover, it (iii) achieves high sampling frequency by
sending probes to all intermediate nodes, including
uncongested ones, (iv) dramatically improves spatial
detection granularity (i.e., from path segments to
individual links), by using short-term congestion
history, (v) considerably enhances the measurement
quality by adjusting the probing methodology (e.g.,
send 4-, 3-, or 2-packet probes) based on the observed
path topology, and (vi) deterministically detects
moments of its own inaccuracy. We conduct a large-scale
measurement study on over 23,000 Internet paths and
present their spatial-temporal properties as inferred
by {\em Pong}.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "coordinated probing; Pong",
}
@Article{Aalto:2007:MDO,
author = "Samuli Aalto and Urtzi Ayesta",
title = "Mean delay optimization for the {M/G/1} queue with
{Pareto} type service times",
journal = j-SIGMETRICS,
volume = "35",
number = "1",
pages = "383--384",
month = jun,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1269899.1254941",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:48 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Gittins index; M/G/1; mean delay; Pareto distribution;
scheduling",
}
@Article{Squillante:2007:F,
author = "Mark S. Squillante",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "2--2",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330558",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The complexity of computer systems, networks and
applications, as well as the advancements in computer
technology, continue to grow at a rapid pace.
Mathematical analysis, modeling and optimization have
been playing, and continue to play, an important role
in research studies to investigate fundamental issues
and tradeoffs at the core of performance problems in
the design and implementation of complex computer
systems, networks and applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gianini:2007:PNR,
author = "Gabriele Gianini and Ernesto Damiani",
title = "{Poisson}-noise removal in self-similarity studies
based on packet-counting: factorial-moment\slash
strip-integral approach",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "3--5",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330559",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we point out that some common methods for
estimating self-similarity parameters --- involving
packet counting for the estimate of statistical moments
--- are affected by distortion at the finest
resolutions and quantization errors and we illustrate
--- using also a small sample of the Bellcore data set
--- a technique for removing this undesirable effect,
based on factorial moments and strip integrals. Then we
extend the strip-integral approach to the approximation
of the square of the Haar wavelet coefficients, for the
estimate of the Hurst self-affinity exponent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2007:FBS,
author = "Vladimir Marbukh",
title = "Fair bandwidth sharing under flow arrivals\slash
departures: effect of retransmissions on stability and
performance",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "6--8",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330560",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A flow-level Markov model for fair bandwidth sharing
with packet retransmissions and random flow
arrivals/departures is proposed. The model accounts for
retransmissions by assuming that file transfer rates
are determined by the end-to-end goodputs rather than
the corresponding throughputs as in the conventional
model. The model predicts the network instability even
under light exogenous load. Despite instability, a
desirable metastable network state with finite number
of flows in progress may exist. The network can be
stabilized in a close neighborhood of the metastable
state with admission control at the cost of small flow
rejection probability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "admission control; arriving/departing flows; fair
bandwidth sharing; performance; retransmissions;
stability",
}
@Article{Osogami:2007:AMT,
author = "Takayuki Osogami",
title = "Accuracy of measured throughputs and mean response
times",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "9--11",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330561",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of computer systems such as Web
systems is measured to guarantee quality of service
(QoS) or to compare difference configurations of the
systems [8]. We consider the problem of whether we
should measure mean response time or throughput to
better guarantee QoS or to better compare different
configurations of a Web system. Specifically, is
measured mean response time or measured throughput more
accurate, when the Web system is measured for a fixed
period of time?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2007:EHM,
author = "Varun Gupta and Jim Dai and Mor Harchol-Balter and
Bert Zwart",
title = "The effect of higher moments of job size distribution
on the performance of an {\em {M/G/s}\/} queueing
system",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "12--14",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330562",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The {\em M/G/s/} queueing system is the oldest and
most classical example of multiserver systems. Such
multiserver systems are commonplace in a wide range of
applications, ranging from call centers to
manufacturing systems to computer systems, because they
are cost-effective and their serving capacity can be
easily scaled up or down.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hossfeld:2007:MOT,
author = "Tobias Ho{\ss}feld and Kenji Leibnitz and Marie-Ange
Remiche",
title = "Modeling of an online {TV} recording service",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "15--17",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330563",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, new services have emerged which utilize the
Internet as a delivery mechanism for multimedia
content. With the advent of broadband accesses, more
users are willing to download large volume content from
servers, such as video files of TV shows. While some
popular video services (e.g. YouTube.com) or some
broadcasting companies (e.g. ABC.com) use streaming
data with Flash technology, some media distributors
(e.g. iTunes) offer entire TV shows for download. In
this study, we investigate the performance of the
German site OnlineTVRecorder.com (OTR), which acts as
an online video cassette recorder (VCR) where users can
program their favorite shows over a web interface and
download the recorded files from a server or its
mirrors. These files are offered in different file
formats and can consist of several hundred megabytes up
to 1 GB or more depending on the length of the TV show
as well as the encoding format. OTR can, thus, be seen
as an example for a server-based content distribution
system with large data files.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2007:OTC,
author = "Peng Wang and Stephan Bohacek",
title = "An overview of tractable computation of optimal
scheduling and routing in mesh networks",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "18--20",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Capacity optimization by optimizing transmission
schedules of wireless networks has been an active area
of research for at least 20 years. The challenge is
that the space over which the optimization is performed
is exponential in the number of links in the network.
For example, in the simple SISO case where no power
control is used and only one bitrate is available, the
optimization must be performed over a space of size $
2^L $ where there are $L$ links in the network. Thus, a
brute force approach to this optimization is not
possible for even moderate size networks of more than a
few tens of links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2007:ESE,
author = "Florin Ciucu",
title = "Exponential supermartingales for evaluating end-to-end
backlog bounds",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "21--23",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A common problem arising in network performance
analysis with the stochastic network calculus is the
evaluation of ({\em min}, +) convolutions. This paper
presents a method to solve this problem by applying a
maximal inequality to a suitable constructed
supermartingale. For a network with D/M input,
end-to-end backlog bounds obtained with this method
improve existing results at low utilizations. For the
same network, it is shown that at utilizations smaller
than a certain threshold, fluid-flow models may lead to
inaccurate approximations of packetized models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2007:IPS,
author = "Varun Gupta and Karl Sigman and Mor Harchol-Balter and
Ward Whitt",
title = "Insensitivity for {PS} server farms with {JSQ}
routing",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "24--26",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Join-the-Shortest-Queue (JSQ) is a very old and
popular routing policy for server farms. Figure 1 shows
two examples of server farm architectures employing JSQ
routing. In both cases, each incoming job is
immediately dispatched, via a front-end router, to the
queue with the fewest number of jobs, designated as the
{\em shortest queue\/} (ties are broken at random). In
Figure 1(a), jobs at a queue are served in
First-Come-First-Served (FCFS) order. In Figure 1(b),
jobs within a queue are served according to
Processor-Sharing (PS), meaning that when there are $n$
jobs at a queue, they {\em share\/} the processing
capacity, each simultaneously receiving 1/nth of the
service. We refer to Figure 1(a) as a JSQ/FCFS server
farm and to Figure 1(b) as a JSQ/PS farm. If more
detail is needed, we use the notation: M/G/K/JSQ/PS,
denoting a Poisson arrival process, i.i.d. job sizes
from a general distribution, $K$ servers, JSQ routing;
and PS scheduling at queues.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "insensitivity; JSQ; processor-sharing; shortest queue
routing; single-queue approximation",
}
@Article{Casale:2007:CMA,
author = "Giuliano Casale and Eddy Z. Zhang and Evgenia Smirni",
title = "Characterization of moments and autocorrelation in
{MAPs}",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "27--29",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markovian Arrival Processes (MAPs) [9] are a general
class of point processes which admits,
hyper-exponential, Erlang, and Markov Modulated Poisson
Processes (MMPPs) as special cases. MAPs can be easily
integrated within queueing models. This makes MAPs
useful for evaluating the impact of non-Poisson
workloads in networking and for quantifying the
performance of multi-tiered e-commerce applications and
disk drives [8, 10].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Field:2007:AAN,
author = "Tony Field and Peter Harrison",
title = "Approximate analysis of a network of fluid queues",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "30--32",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fluid models have for some time been used to
approximate stochastic networks with discrete state.
These range from traditional `heavy traffic'
approximations to the recent advances in bio-chemical
system models. Here we use an approximate compositional
method to analyse a simple feedforward network of fluid
queues which comprises both probabilistic branching and
superposition. This extends our earlier work that
showed the approximation to yield excellent results for
a linear chain of fluid queues. The results are
compared with those from a simulation model of the same
system. The compositional approach is shown to yield
good approximations, deteriorating for nodes with high
load when there is correlation between their immediate
inputs. This correlation arises when a common set of
external sources feeds more than one queue, directly or
indirectly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reich:2007:TCU,
author = "Joshua Reich and Vishal Misra and Dan Rubenstein",
title = "The time-correlated update problem",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "33--35",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent advances in the fields of sensor networks and
mobile robotics have provided the means to place
monitoring/sensing equipment in an increasingly wide
variety of environments --- a significant proportion of
which can reasonably be expected to lack traditional
network connectivity characteristics [5] [8].
Challenged networks, operating under significant sets
of constraints in which disconnected paths and long
delays are normal events, have come to be known as
Delay/Disruption Tolerant Networks (DTN) [2]. Some
examples of environments in which DTN techniques may be
required include remote or vast domains such as
underground, underwater, outer-space, Arctic, and
mountainous environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kang:2007:PFS,
author = "W. N. Kang and F. P. Kelly and N. H. Lee and R. J.
Williams",
title = "Product form stationary distributions for diffusion
approximations to a flow-level model operating under a
proportional fair sharing policy",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "36--38",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a flow-level model of Internet congestion
control introduced by Massouli{\'e} and Roberts [2]. We
assume that bandwidth is shared amongst elastic
documents according to a weighted proportional fair
bandwidth sharing policy. With Poisson arrivals and
exponentially distributed document sizes, we focus on
the heavy traffic regime in which the average load
placed on each resource is approximately equal to its
capacity. In [1], under a mild local traffic condition,
we establish a diffusion approximation for the workload
process (and hence for the flow count process) in this
model. We first recall that result in this paper. We
then state results showing that when all of the weights
are equal (proportional fair sharing) the diffusion has
a product form invariant distribution with a strikingly
simple interpretation in terms of dual random
variables, one for each of the resources of the
network. This result can be extended to the case where
document sizes are distributed as finite mixtures of
exponentials, and to models that include multi-path
routing (these extensions are not described here, but
can be found in [1]).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2007:OCP,
author = "Yingdong Lu and Ana Radovanovi{\'c} and Mark S.
Squillante",
title = "Optimal capacity planning in stochastic loss
networks",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "39--41",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A large number of application areas involve resource
allocation problems in which resources of different
capabilities are used to provide service to various
classes of customers at their arrival instants,
otherwise the opportunity to serve the customer is
lost. Stochastic loss networks are often used to
capture the dynamics and uncertainty of this class of
resource allocation problems. A wide variety of
examples include applications in telephony and data
networks, distributed computing and data centers,
inventory control and manufacturing systems, and call
and contact centers. Another emerging application area
is workforce management where, e.g., an IT services
company offers a collection of service products, each
requiring a set of resources with certain capabilities.
The customer demands for such IT service products are
stochastic and the IT services company seeks to
determine its per-class resource capacity levels in
order to maximize its profits over the long run.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cherkasova:2007:CTC,
author = "Ludmila Cherkasova and Diwaker Gupta and Amin Vahdat",
title = "Comparison of the three {CPU} schedulers in {Xen}",
journal = j-SIGMETRICS,
volume = "35",
number = "2",
pages = "42--51",
month = sep,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1330555.1330556",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:52 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The primary motivation for enterprises to adopt
virtualization technologies is to create a more agile
and dynamic IT infrastructure --- with server
consolidation, high resource utilization, the ability
to quickly add and adjust capacity on demand --- while
lowering total cost of ownership and responding more
effectively to changing business conditions. However,
effective management of virtualized IT environments
introduces new and unique requirements, such as
dynamically resizing and migrating virtual machines
(VMs) in response to changing application demands. Such
capacity management methods should work in conjunction
with the underlying resource management mechanisms. In
general, resource multiplexing and scheduling among
virtual machines is poorly understood. CPU scheduling
for virtual machines, for instance, has largely been
borrowed from the process scheduling research in
operating systems. However, it is not clear whether a
straight-forward port of process schedulers to VM
schedulers would perform just as well. We use the open
source Xen virtual machine monitor to perform a
comparative evaluation of three different CPU
schedulers for virtual machines. We analyze the impact
of the choice of scheduler and its parameters on
application performance, and discuss challenges in
estimating the application resource requirements in
virtualized environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marsan:2007:F,
author = "Marco Ajmone Marsan and Prashant Shenoy",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "2--3",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328692",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance 2007, the 26-th International Symposium on
Computer Performance, Modeling, Measurements, and
Evaluation was held in Cologne, Germany, on October
2--5, 2007. Like in the past, in addition to the main
technical program, a poster session was organized to
present and discuss ongoing or recent research work in
an informal setting.\par
A total of 11 posters were selected for presentation
during the conference by the Performance 2007 Technical
Program Committee. This special issue of {\em
Performance Evaluation Review\/} consists of the
extended abstracts of these posters, which cover a wide
range of topics in the area of performance evaluation,
analytical modeling and simulation of computer systems
and communication networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cesana:2007:EPC,
author = "M. Cesana and L. Campelli and F. Borgonovo",
title = "Efficiency of physical carrier sensing in wireless
access networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "4--6",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328693",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose an analytical approach for evaluating the
impact of physical carrier sensing in simple wireless
access networks. We describe the system through a
time-continuous Markov Chain, and we gather from its
solution performance measures in terms of throughput
and collision probability. We derive qualitative
dimensioning criteria for the carrier sensing itself
under different network conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cano:2007:HDE,
author = "Juan-Carlos Cano and Jos{\'e}-Manuel Cano and Eva
Gonz{\'a}lez and Carlos Calafate and Pietro Manzoni",
title = "How does energy consumption impact performance in
{Bluetooth}?",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "7--9",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328694",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we investigate the power characteristics
of the Bluetooth technology when supporting low-power
modes. We provide accurate power consumption
measurements for different Bluetooth operating modes.
Such information could be used to drive technical
decisions on battery type and design of Bluetooth-based
end systems. Finally, we examine the trade-off between
power consumption and performance for a commercial
off-the-shelf Bluetooth device. We find that the use of
the {\em sniff\/} mode could be quite compatible with
the use of multi-slot data packets. However, when the
channel conditions require selecting single slot data
packets, the {\em sniff\/} mode highly impact
performance, and so the power/delay trade-off must be
taken into consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lukas:2007:IBL,
author = "Georg Lukas and Andr{\'e} Herms and Daniel
Mahrenholz",
title = "Interval based off-line clock synchronization for
wireless mesh networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "10--12",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328695",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless mesh networks suffer from various problems
like congestion or packet collisions. To identify and
overcome these problems an exact global view of the
communication is required. However, it is not possible
to observe the whole network from a single location.
Instead, a distributed monitoring is necessary, which
has to include clock synchronization. We present a new
interval-based algorithm for the off-line
synchronization of passively monitored network events.
It calculates the worst-case time interval for every
event on a global clock, while considering inaccuracies
caused by processing jitter and non-uniform clock
drifts. The experimental evaluation on a live mesh
network shows an accuracy of better than 130$ \mu s $
over a four-hop distance, which is below the minimum
transmission time of data packets. Thereby, our
algorithm creates a highly precise global view of the
network, which allows a detailed diagnosis of wireless
mesh networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chydzinski:2007:SFB,
author = "Andrzej Chydzinski",
title = "Solving finite-buffer queues with {Markovian}
arrivals",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "13--15",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328696",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this report we study queueing systems satisfying
the following conditions:\par
{\bullet} finite buffer (waiting room)\par
{\bullet} the left-skip-free queue size process at
departure epochs\par
{\bullet} arrival process with Markovian structure",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:2007:ASM,
author = "Gianfranco Ciardo and Andrew S. Miner and Min Wan and
Andy Jinqing Yu",
title = "Approximating stationary measures of structured
continuous-time {Markov} models using matrix diagrams",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "16--18",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328697",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the stationary solution of large ergodic
continuous-time Markov chains (CTMCs) with a finite
state space $S$, i.e., the computation of $ \pi $ as
solution of $ \pi \cdot Q = 0$ subject to $ \sum_{i
\epsilon } s \pi [i] = 1$, where $Q$ coincides with
transition rate matrix $R$ except in its diagonal
elements, $ Q[i, i] = - \sum_{j \epsilon } s R[i,
j]$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Silveira:2007:PPL,
author = "Fernando Silveira and Edmundo {de Souza e Silva}",
title = "Predicting packet loss statistics with hidden {Markov}
models",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "19--21",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328698",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A number of applications can benefit from estimating
future loss statistics. For instance, if the end-to-end
loss characteristics of a path can be well approximated
in advance, then a media streaming application could
adapt its transmission parameters in order to deliver
data with an acceptable quality to the user. In this
work, we present a framework for adaptive prediction
using hidden Markov models (HMMs). We propose a new
class of hidden Markov models whose parameter values
can be efficiently computed as compared to general
HMMs. We also develop methods for predicting two
measures of interest from HMMs, and perform experiments
over a set of packet traces to assess the goodness of
these predictions. Finally, we apply our prediction
framework to dynamically select a forward error
correction (FEC) scheme for media streaming. Using real
Internet packet traces we evaluate the performance of
our approach by emulating a VoIP tool. The PESQ
algorithm is applied to assess the perceptual speech
quality before and after the dynamic FEC selection. Our
results show that the prediction-based approach
achieves significant quality improvements with a small
increase in the average transmission rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menth:2007:NSM,
author = "Michael Menth and Andreas Binzenh{\"o}fer and Stefan
M{\"u}hleck",
title = "A note on source models for speech traffic",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "22--24",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328699",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Speech traffic is often used in simulations to
evaluate the performance of control mechanisms in
communication networks. Therefore, trustworthy models
are required that capture the fundamental statistical
properties of typical voice sources. The G.723.1 codec
produces on/off traffic streams with fixed size
packets. The iSAC codec strongly periodic packet
streams with variable packet sizes. We propose new
models for the traffic output of both codecs and show
that their queuing properties are in good accordance
with those of original traffic traces, while existing
traffic models, that are frequently used in literature,
lead to significant discrepancies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bracciale:2007:OOP,
author = "Lorenzo Bracciale and Francesca {Lo Piccolo} and Dario
Luzzi and Stefano Salsano",
title = "{OPSS}: an overlay peer-to-peer streaming simulator
for large-scale networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "25--27",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328700",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present OPSS, an Overlay Peer-to-peer
Streaming Simulator designed to simulate large scale
(i.e. in the order of 100K nodes) peer-to-peer
streaming systems. OPSS is able to simulate a fair
(i.e. `TCP-like') sharing of the uplink and downlink
bandwidth among different connections, and it
guarantees extensibility by allowing the implementation
of different peer-to-peer streaming algorithms as
separate modules. Source code of OPSS is available
under the GPL license.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Engels:2007:ETS,
author = "Kai Engels and Ralf Heidger and Reinhold Kroeger and
Morris Milekovic and Jan Schaefer and Markus Schmid and
Marcus Thoss",
title = "{eMIVA}: tool support for the instrumentation of
critical distributed applications",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "28--30",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328701",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years, enterprise applications faced an ever
growing complexity of business processes as well as an
increase in the number of interacting hardware and
software components. The ability to efficiently manage
their IT infrastructure up to the application level is
therefore critical to a company's success and results
in rising importance of Service Level Management (SLM)
technologies [6, 10]. As a prerequisite for application
management, monitoring and instrumentation techniques
face growing interest. Depending on the criticality of
an application, monitoring can either be based on
statistical samples, or can require monitoring of each
request handled by the system, e.g. for validation or
verification purposes. While most enterprise
applications belong to the first category, air traffic
control scenarios are an example for the second
category. Here, even a statistically small number of
slow requests may result in dangerous situations or
fatal accidents.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dube:2007:CPQ,
author = "Parijat Dube and Corinne Touati and Laura Wynter",
title = "Capacity planning, quality of service and price wars",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "31--33",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328702",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We model the relationship between capacity, Quality of
Service (QoS) and offered prices of service providers
in a competitive e-services market. Capacity and QoS
are linked through simple queueing formulae while QoS
and price are coupled through distributions on customer
preferences. We study the sensitivity of market share
of providers to price, capacity and market size. We
revisit the notion of `price wars' that has been shown
to lead to zero profits for all providers and conclude
that our more general model does admit some form of
anomalous behavior, but which need not lead to zero
profits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Begin:2007:HLA,
author = "Thomas Begin and Alexandre Brandwajn and Bruno Baynat
and Bernd E. Wolfinger and Serge Fdida",
title = "High-level approach to modeling of observed system
behavior",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "34--36",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328703",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current computer systems and communication networks
tend to be highly complex, and they typically hide
their internal structure from their users. Thus, for
selected aspects of capacity planning, overload control
and related applications, it is useful to have a method
allowing one to find good and relatively simple
approximations for the observed system behavior. This
paper investigates one such approach where we attempt
to represent the latter by adequately selecting the
parameters of a set of queueing models. We identify a
limited number of queueing models that we use as
`Building Blocks' (BBs) in our procedure. The selected
BBs allow us to accurately approximate the measured
behavior of a range of different systems. We propose an
approach for selecting and combining suitable BB, as
well as for their calibration. Finally, we validate our
methodology and discuss the potential and the
limitations of the proposed approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Misra:2007:F,
author = "Vishal Misra",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "37--37",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328705",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sigmetrics 2007 was held from June 12--16th in San
Diego as part of the Federated Computing Research
Conference. This year a Student Workshop was introduced
in addition to the main technical program, and it was
held on June 12th and 13th. Submissions were solicited
in the form of extended abstracts and reviewed by a
program committee. This special issue of {\em
Performance Evaluation Review\/} presents the 16
abstracts finally chosen for the program. All the
authors of accepted abstracts were given a travel grant
by Sigmetrics to come and attend the whole conference.
The program started on the afternoon of June 12th with
a highly educative, informative and entertaining talk
by Simon-Peyton Jones of Microsoft Research Cambridge
on `How to write a great paper'. The next day the
student authors presented their research in a poster
session that was well attended by the regular
conference attendees. Special mention must go to the
outgoing Sigmetrics Chair, Albert Greenberg, who spent
a considerable amount of time with each and every
student presenter and gave valuable feedback to them.
After the poster session in the afternoon we had a
panel on `Performance Evaluation: An Industry
Perspective'. The participants were Albert Greenberg
(Microsoft Research), Arif Merchant (HP Labs), Muthu
Muthukrishnan (Google), Shubhabrata Sen (AT&T
Research), and Cathy Xia (IBM). The panel was
originally scheduled to run for 90 minutes, but it ran
almost twice the scheduled time with neither the
audience nor the panelists in any mood to cut short the
lively discussion.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhu:2007:LWA,
author = "Wenbin Zhu and Patrick G. Bridges and Arthur B.
Maccabe",
title = "Light-weight application monitoring and tuning with
embedded gossip",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "38--39",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328706",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "For large-scale, long-running applications, monitoring
can be expensive. While traditional trace-based
monitoring provides detailed information about an
application, it is expensive to record and gather the
traced performance data. Processing the voluminous
traced data is so demanding that information about the
monitored application is only available post-mortem.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kortebi:2007:IAS,
author = "Riadh M. Kortebi and Yvon Gourhant and Nazim
Agoulmine",
title = "Interference-aware {SINR}-based routing: algorithms
and evaluation",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "40--42",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328707",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of mitigating interference and
improving network capacity in, single-radio,
single-channel, wireless multi-hop network. An ongoing
aim of our research is to design a routing metric which
is cognizant of interference. Modelling routing with a
complete set of interference constraints is a NP-hard
problem. One major issue to be addressed is to infer
the degree of interference among different flows. To
address this issue, and based on the measurement of the
received signal strengths, we propose a 2-Hop
interference Estimation AlgoRithm (2-HEAR). With the
use of the received signal level, a node can calculate
the signal to interference plus noise ratio (SINR) of
the links to its neighbors. The calculated SINR is used
to infer the packet error rate (PER) between a node and
each of it I$^{\em st}$ tier interfering nodes set.
Then the residual capacity at a given node is estimated
using the calculated PERs. A cost function is used at
the aim of load-balancing between the different flows
within the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bui:2007:ORA,
author = "Loc Bui and R. Srikant and Alexander Stolyar",
title = "Optimal resource allocation for multicast flows in
multihop wireless networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "43--43",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328708",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, the network utilization maximization theory
has been extended to include resource allocation for
multi-hop wireless networks. However, the existing
theory is applicable only to unicast flows. Other than
developing appropriate notations, it is somewhat
straightforward to extend the theory to multicast flows
if one assumes that data is delivered to all the
receivers in a multicast group at the same rate. Such a
form of multicast is called single-rate multicast. On
the other hand, there are many video applications which
allow layered-transmission so that different receivers
can subscribe to different numbers of layers and
receive different qualities of the same video,
depending upon the congestion level in their respective
neighborhoods. Moreover, in wireless networks, due to
varying signal strengths at different receivers, it may
not be desirable nor feasible to deliver data at the
same rate to all the receivers in a multicast group.
Thus, it is important to extend the optimization-based
theory to handle multi-rate multicast flows, i.e.,
multicast flows where different receivers are allowed
to receive at different rates. Such an extension is not
straightforward as in the case of single-rate
multicast, and is the main subject of this paper. We
note that the multi-rate multicast problem has been
considered in the context of wired network. However,
those approaches cannot be directly applied to wireless
networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mi:2007:PIA,
author = "Ningfang Mi",
title = "Performance impacts of autocorrelated flows in
multi-tiered systems",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "44--45",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328709",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We focus on the general problem of capacity planning
and performance prediction of multi-tiered systems.
Workload characterization studies of such systems
usually examine the stochastic characteristics of
arrivals to the system and wait/service times at
various tiers aiming at bottleneck identification,
diagnosing the conditions under which bottlenecks are
triggered, and assisting the development of resource
management policies to improve performance or provide
service level provisioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kaushik:2007:RCA,
author = "Neena Kaushik and Silvia Figueira and Stephen A.
Chiappari",
title = "Resource co-allocation using advance reservations with
flexible time-windows",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "46--48",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328710",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Co-allocations require the availability of more than
one resource for utilization in a time interval. We
show that co-allocations increase the blocking
probability and analyze the use of flexible windows to
lower blocking probability in spite of
co-allocations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "advance reservations; blocking probability;
co-allocation; flexible time-windows",
}
@Article{Verloop:2007:ERA,
author = "Maaike Verloop and Rudesindo N{\'u}{\~n}ez-Queija",
title = "Efficient resource allocation in bandwidth-sharing
networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "49--50",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328711",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Document transfer in the Internet is regulated by
distributed packet-based congestion control mechanisms,
usually relying on TCP. By dividing a document into
packets, parts of one file reside at different nodes
along the transmission path. The `instantaneous
transfer rate' of the entire document can be thought of
as being equal to the minimum transfer rate along the
entire path. Bandwidth-sharing networks as considered
by Massouli{\'e} & Roberts [2] provide a natural
modeling framework for the dynamic flow-level
interaction among document transfers. The class $
\alpha $-fair policies for such networks, as introduced
by Mo \& Walrand [3], captures a wide range of
distributed allocation mechanisms such as TCP, the
proportional fair allocation and the max-min fair
allocation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Miretskiy:2007:TQS,
author = "D. I. Miretskiy and W. R. W. Scheinhardt and M. R. H.
Mandjes",
title = "Tandem queue with server slow-down",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "51--52",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328712",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study how rare events happen in the standard
two-node tandem Jackson queue and in a generalization,
the so-called slow-down network, see [2]. In the latter
model the service rate of the first server depends on
the number of jobs in the second queue: the first
server slows down if the amount of jobs in the second
queue is above some threshold and returns to its normal
speed when the number of jobs in the second queue is
below the threshold. This property protects the second
queue, which has a finite capacity $B$, from overflow.
In fact this type of overflow is precisely the rare
event we are interested in. More precisely, consider
the probability of overflow in the second queue before
the entire system becomes empty. The starting position
of the two queues may be any state in which at least
one job is present.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Volkovich:2007:SMW,
author = "Y. Volkovich and D. Donato and N. Litvak",
title = "Stochastic models for {Web} ranking",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "53--53",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328713",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Web search engines need to deal with hundreds and
thousands of pages which are relevant to a user's
query. Listing them in the right order is an important
and non-trivial task. Thus Google introduced {\em
PageRank\/} [1] as a popularity measure for Web pages.
Besides its primary application in search engines,
PageRank also became a major method for evaluating
importance of nodes in different informational networks
and database systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hylick:2007:HDP,
author = "Anthony Hylick and Andrew Rice and Brian Jones and
Ripduman Sohan",
title = "Hard drive power consumption uncovered",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "54--55",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328714",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Power consumption is a problem affecting all forms of
computing, from server farms to mobile devices. Hard
disks account for a significant percentage of a
machine's power consumption due to the mechanical
nature of drive operation and increasingly
sophisticated electronics. Due to this fact, there has
been much research conducted with aims at reducing the
power consumption of hard drives; examples including
adaptive spin-down policies [1] and probabilistic
management approaches [4]. However, this work has been
done without fine-grained measurements of drive power
consumption to accurately characterize trends; a
shortcoming observed by other authors [3].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulati:2007:TFE,
author = "Ajay Gulati and Peter Varman and Arif Merchant and
Mustafa Uysal",
title = "Towards fairness and efficiency in storage systems",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "56--58",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328715",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fairness and overall I/O efficiency are two opposing
forces when it comes to sharing I/O among different
applications. Although providing QoS guarantees for
applications sharing a storage server are desirable
under many scenarios, existing work has not been able
to make a convincing case for using fairness mechanisms
for disk scheduling, mainly due to their impact on
overall throughout. In this work, we plan to
investigate two major issues: (1) study the trade-off
between fairness and efficiency, and develop mechanisms
to improve the I/O efficiency of fair schedulers (2)
provide performance guarantees to applications in terms
of higher-level application metrics (such as
transactions/sec), by changing the parameters in a
fairness algorithm that affect the allocations at the
block level.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Heimlicher:2007:EEV,
author = "Simon Heimlicher and Pavan Nuggehalli and Martin May",
title = "End-to-end vs. hop-by-hop transport",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "59--60",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328716",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The transport layer has been considered an end-to-end
issue since the early days of the Internet in the 1980s
[1], when the TCP/IP protocol suite was designed to
connect networks of dedicated routers over wired links.
However, over the last quarter of a century, network
technology as well as the understanding of the Internet
has changed, and today's wireless networks differ from
the Internet in many aspects. Since wireless links are
unreliable, it is often impossible to sustain an
end-to-end connection to transmit data in wireless
network scenarios. Even if an end-to-end path exists in
the network topology for some fraction of the
communication, it is likely to break due to signal
propagation impairments, interference, or node
mobility. Under these circumstances, the operation of
an end-to-end transport protocol such as TCP may be
severely affected.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Balakrichenan:2007:SPT,
author = "Sandoche Balakrichenan and Thomas Bugnazet and Monique
Becker",
title = "A simulation platform: for testing and optimization of
{ENUM} architecture",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "61--63",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328717",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Electronic NUmber Mapping (ENUM)[1] System, a suite of
protocols developed by IETF is one of the simplest
approach which permits communicating from the telephony
to the Internet Protocol (IP) world and vice versa in a
seamless manner. Implementing ENUM is simple because it
uses the existing Domain Name System (DNS) to store and
serve the information linking PSTN telephone numbers to
network addresses and services (email address, SIP
phone number etc.). Explanation of how a telephone
number is converted to a Fully Qualified Domain Name
(FQDN) is shown in fig.1.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DNS; ENUM; HMM; model",
}
@Article{Mohror:2007:SEB,
author = "Kathryn Mohror and Karen L. Karavanic",
title = "Scalable event-based performance measurement in
high-end environments",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "64--65",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328718",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We are developing a novel performance measurement
technique to address the scalability challenges of
event-based tracing on high-end computing systems. We
collect the information needed to diagnose performance
problems that traditionally require traces, but at a
greatly reduced data volume. Performance analysis
working on today's high-end systems require event-based
measurements to correctly identify the root cause of a
number of the complex performance problems that arise
on these highly parallel systems. These
high-end-architectures contain tens to hundreds of
thousands of processors, pushing application
scalability challenges to new heights. Unfortunately,
the collection of event-based data presents scalability
challenges itself: the added measurement instructions
and tool activities perturb the target application; and
the large volume of collected data increases tool
overhead, and results in data files that are difficult
to store and analyze.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vicari:2007:DRP,
author = "Claudio Vicari and Chiara Petrioli and Francesco {Lo
Presti}",
title = "Dynamic replica placement and traffic redirection in
content delivery networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "66--68",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328719",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper jointly addresses dynamic replica placement
and traffic redirection to the best replica in Content
Delivery Networks (CDNs). Our solution is fully
distributed and localized and trade-offs the costs paid
by the CDN provider (e.g., the number of allocated
replicas, frequency of replicas additions and removals)
with the quality of the content access service as
perceived by the final user. Our simulations
experiments show that the proposed scheme results into
a number of replicas which is only slightly higher than
the minimum required to be able to satisfy all users
requests, thus keeping the replicas at a good level of
utilization.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content access; content delivery networks; dynamic
replica placement; user requests redirection",
}
@Article{Papadopoulos:2007:PPI,
author = "Fragkiskos Papadopoulos and Konstantinos Psounis",
title = "Predicting the performance of {Internet}-like networks
using scaled-down replicas",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "69--71",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328720",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Internet is a large, heterogeneous system
operating at very high speeds and consisting of a large
number of users. Researchers use a suite of tools and
techniques in order to understand the performance of
complex networks like the Internet: measurements,
simulations, and deployments on small to medium-scale
testbeds. This work considers a novel addition to this
suite: a class of methods to {\em scale down\/} the
{\em topology\/} of the Internet that enables
researchers to create and observe a smaller replica,
and extrapolate its performance to the expected
performance of the larger Internet.\par
The key insight that we leverage is that only the
congested links along the path of each flow introduce
sizable queueing delays and dependencies among flows.
Hence, one might hope that the network properties can
be captured by a topology that consists of the
congested links only. We have verified this in [11, 12]
using extensive simulations with TCP traffic and
theoretical analysis. Further, we have also shown that
simulating a scaled topology can be up to two orders of
magnitude faster than simulating the original topology.
However, a main assumption of our approach was that
un-congested links are known in advance.\par
We are currently working on establishing rules that can
be used to efficiently identify uncongested links in
large and complex networks like the Internet, when
these are not known, and which can be ignored when
building scaled-down network replicas.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shamsi:2007:PPS,
author = "Jawwad Shamsi and Monica Brockmeyer",
title = "{PSON}: predictable service overlay networks",
journal = j-SIGMETRICS,
volume = "35",
number = "3",
pages = "72--74",
month = dec,
year = "2007",
CODEN = "????",
DOI = "https://doi.org/10.1145/1328690.1328721",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:53 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Predictable Service Overlay Networks (PSON) improve
the predictability of Internet communication by
providing an estimate of the upper bound on message
latency for each path of the overlay. The upper bound
serves as an assurance of synchrony and enables
applications to order events or make inferences based
on non-receipt of a message. For improved performance,
PSON also employs overlay routing and overlay
configuration. Messages are routed either through the
direct overlay path or via a one-hop overlay path such
that the selected path is stable and promotes
synchrony, while the overlay configuration mechanisms
are utilized in order to select nodes that promote
predictable communication. The expected impact of PSON
is that by utilizing intelligent techniques such as
upper bound estimation, routing and configuration, it
can harness the unexpected and unreliable Internet
substrate to provide a predictable communication
overlay for applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet synchrony; overlay networks; predictable
communication",
}
@Article{Gilmore:2008:F,
author = "Stephen Gilmore and Jane Hillston",
title = "Foreword",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "2--2",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364649",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The four papers in this special issue apply tools and
techniques from computer performance evaluation in the
very different domain of modelling biological systems.
This might seem to be a very odd thing to do but the
practice of analysing biological systems in this way is
becoming increasing common. As data about the internal
components of biological systems is becoming more
readily available, biologists are increasingly asking
questions about {\em how\/} systems function. In
addition to conducting laboratory experiments, they are
supported in this exploration by {\em in silico\/}
experimentation based on models. The view taken of the
biological processes focusses on the stimuli and
responses, a view akin to that taken of engineered
systems in systems engineering. Thus this new endeavour
in biology is known as {\em Systems
Biology}.\par
Performance analysts have a long tradition of modelling
systems in order to understand and predict their
function. Their focus is particularly on the dynamic
aspects of the system, the use of, and contention for,
resources, and the impact of uncertainty or randomness.
These issues are important in the biological setting
also, and so it is perhaps inevitable that we see some
people and techniques from performance modelling being
applied in systems biology. In particular some of the
high-level modelling formalisms which have supported
Markovian performance modelling in the last few decades
(stochastic Petri nets, stochastic process algebras,
etc.) are being applied in the biological domain.
Furthermore analysis techniques, such as Markovian
analysis, Monte Carlo simulation and probabilistic
model checking have also been adopted.\par
In this volume we have sought to give a snapshot of a
variety of work which is going on at this interface
between systems biology and more traditional
quantitative analysis techniques. It is by no means an
exhaustive account of this exciting area, but rather a
taster which will hopefully whet your appetite to find
out more.\par
To open the volume, the editors provide a survey paper
describing the motivations and goals of the systems
biology endeavour, summarising the existing modelling
techniques and outlining some instances of cross-over
between performance modelling and systems biology. This
includes an account of the use of ordinary differential
equations (ODE) and stochastic simulation to analyse
biological systems, and the adoption of high-level
modelling formalisms such as Petri nets and process
algebras to drive these ODE models and
simulations.\par
In their paper Kwiatkowska, Norman and Parker show the
application of logic and probabilistic model checking
to the analysis of biological signalling pathways. They
use the PRISM probabilistic model-checker to check
formulae of the CSL logic against CTMC-based models of
the MAPK cascade, a sequence of biochemical reactions
which sends a message within a cell. The paper provides
an introduction to the CSL logic as well as the
reactive modules language implemented by the PRISM
model checker. Performance measures of interest are
described using reward structures and the analysis
achieved by PRISM is able to show how the percentage of
activated MAPK, a key component of the pathway, and the
number of MAPK-MAPKK reactions, vary as a function of
time, for different values of the initial number of
MAPKs.\par
The paper by Jeschke, Ewald, Park, Fujimoto and
Uhrmacher addresses the drive for increased physical
accuracy in simulation models which represent the
spatial aspects of cell biology. Standard approaches to
stochastic simulation of cellular systems assume that
the cell is a homogeneous soup of biochemical
components. The truth is far removed from this, as the
cell has a lot of internal structure which can have a
profound effect on the dynamics of reactions. Setting
aside the assumption that the reacting chemical species
are well-stirred, spatial approaches divide the volume
into sub-volumes and apply a structured method which
identifies the next reaction to occur in each
subvolume. The cost of such an increase in accuracy in
the simulation model is a much increased running time
so the authors use a parallel and distributed approach
to improve performance.\par
To close this special issue we have a paper by DemattB,
Priami and Romanel which uses the BlenX language and
the Beta Workbench software to analyse the MAPK pathway
considered also by Kwiatkowska, Norman and Parker. The
BlenX language, and the Beta-binders process calculus
which was its inspiration, are examples of a new
generation of languages which have been designed
specifically for the biological domain, as an
alternative to using existing languages designed for
modelling computer systems. The paper shows how a
well-designed platform for modelling and simulation can
lift the user's experience and make their use of
process calculi more valuable, delivering insights
which would not have been seen otherwise.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gilmore:2008:PEC,
author = "Stephen Gilmore and Jane Hillston",
title = "Performance evaluation comes to life: quantitative
methods applied to biological systems",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "3--13",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364650",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an introduction to the use of
quantitative methods in modelling and analysis of
biological systems. We begin with a survey of the
methods presently in widespread use in computational
biology. We then continue to consider how the modelling
techniques and tools which have been used successfully
in performance evaluation studies of hardware and
software systems are now being applied to model
functions and processes in living systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "biochemical signalling pathways; stochastic process
algebra; systems biology",
}
@Article{Kwiatkowska:2008:UPM,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "Using probabilistic model checking in systems
biology",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "14--21",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364651",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Probabilistic model checking is a formal verification
framework for systems which exhibit stochastic
behaviour. It has been successfully applied to a wide
range of domains, including security and communication
protocols, distributed algorithms and power management.
In this paper we demonstrate its applicability to the
analysis of biological pathways and show how it can
yield a better understanding of the dynamics of these
systems. Through a case study of the MAP
(Mitogen-Activated Protein) Kinase cascade, we explain
how biological pathways can be modelled in the
probabilistic model checker PRISM and how this enables
the analysis of a rich selection of quantitative
properties.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jeschke:2008:PDD,
author = "Matthias Jeschke and Roland Ewald and Alfred Park and
Richard Fujimoto and Adelinde M. Uhrmacher",
title = "A parallel and distributed discrete event approach for
spatial cell-biological simulations",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "22--31",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364652",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As data and knowledge about cell-biological systems
increases so does the need for simulation tools to
support a hypothesis driven wet-lab experimentation.
Discrete event simulation has received a lot of
attention lately, however, often its application is
hampered by its lack of performance. One solution are
parallel, distributed approaches, however, their
application is limited by the amount of parallelism
available in the model. Recent studies have shown that
spatial aspects are crucial for cell biological
dynamics and they are also a promising candidate to
exploit parallelism. Promises and specific requirements
imposed by a spatial simulation of cell biological
systems will be illuminated by a parallel and
distributed variant of the Next-Subvolume Method (NSM),
which augments the Stochastic Simulation Algorithm
(SSA) with spatial features, and its realization in a
grid-inspired simulation system called Aurora.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dematte:2008:MSB,
author = "Lorenzo Dematt{\'e} and Corrado Priami and Alessandro
Romanel",
title = "Modelling and simulation of biological processes in
{BlenX}",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "32--39",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364653",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce a scalable framework built upon the BlenX
language and inspired by the Beta-binders process
calculus to model, simulate and analyse biological
systems. We show the features of the Beta Workbench
framework on a running example based on the
mitogen-activated kinase pathway. We also discuss an
incremental modelling process that allows us to scale
up from pathway to network modelling and analysis. We
finally provide a comparison with related approaches
and some hints for future extensions of the
framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computational biology; modelling and simulation;
process calculi; systems biology",
}
@Article{Sommers:2008:SPR,
author = "Joel Sommers and Paul Barford and Albert Greenberg and
Walter Willinger",
title = "An {SLA} perspective on the router buffer sizing
problem",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "40--51",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364645",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we discuss recent work on buffer sizing
in the context of an ISP's need to offer and guarantee
competitive Service Level Agreements (SLAs) to its
customers. Since SLAs specify the performance that an
ISP guarantees to its customers, they provide critical
context for many configuration and provisioning
decisions and have specific relevance to buffer sizing.
We use a controlled laboratory environment to explore
the tradeoffs between buffer size and a set of
performance metrics over a range of traffic mixtures
for three different router designs. Our empirical study
reveals performance profiles that are surprisingly
robust to differences in router architecture and
traffic mix and suggests a design space within which
buffer sizing decisions can be made in practice. We
then present a preliminary approach for making buffer
sizing decisions within this framework that relates
directly to performance and provisioning requirements
in SLAs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korzun:2008:DMR,
author = "Dmitry Korzun and Andrei Gurtov",
title = "A {Diophantine} model of routes in structured {P2P}
overlays",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "52--61",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364646",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "An important problem in any structured Peer-to-Peer
(P2P) overlay is what routes are available between
peers. Understanding the structure of routes helps to
solve challenging problems related to routing
performance, security, and scalability. In this paper,
we propose a theoretical approach for describing
routes. It is based on a recent result in the linear
Diophantine analysis and introduces a novel Diophantine
model of P2P routes. Such a route aggregates several
P2P paths that packets follow. A commutative
context-free grammar describes the forwarding behavior
of P2P nodes. Derivations in the grammar correspond to
P2P routes. Initial and final strings of a derivation
define packet sources and destinations, respectively.
Based on that we construct a linear Diophantine
equation system, where any solution counts forwarding
actions in a route representing certain integral
properties. Therefore, P2P paths and their composition
into routes are described by a linear Diophantine
systems; its solutions (basis) define a structure of
P2P paths.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sibai:2008:EPS,
author = "Fadi N. Sibai",
title = "Evaluating the performance of single and multiple core
processors with {PCMARK{\reg}05} and benchmark
analysis",
journal = j-SIGMETRICS,
volume = "35",
number = "4",
pages = "62--71",
month = mar,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1364644.1364647",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:42:56 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "PCMark{\reg}05 [4, 8] is a highly popular synthetic
benchmark for evaluating the performance of personal
computers (PCs) with millions of downloads via the
Internet. Based on open source and commercial
applications, it measures the execution time of highly
representative code extracts of these applications and
reports scores reflecting the overall system
performance, the CPU performance, the memory subsystem
performance, the graphics subsystem performance, and
the disk subsystem performance. In this article, we
focus on the PCMark{\reg}05 CPU test suite which is
composed of 8 tests to measure the performance and
scalability of various Intel single- and dual-core
processors. Six of these tests run a single application
each. One test runs 2 multitasked applications in
parallel and another test runs 4 multitasked
applications simultaneously. We present the results of
executing this benchmark's CPU test suite on high end
Intel-based PC platforms with top of the line single
processor and dual core processors, present the results
of our profiling and hotspot analysis, shed some light
on this test suite's prominent microarchitecture events
and its active threads' distributions, and characterize
this suite's workload. These results help in
understanding the performance characteristics of this
popular benchmark and in guiding future processor
design enhancements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "performance benchmark; single and dual core
processors; workload characterization",
}
@Article{Bordenave:2008:PRM,
author = "Charles Bordenave and David McDonald and Alexandre
Proutiere",
title = "Performance of random medium access control, an
asymptotic approach",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "1--12",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375459",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random Medium-Access-Control (MAC) algorithms have
played an increasingly important role in the
development of wired and wireless Local Area Networks
(LANs) and yet the performance of even the simplest of
these algorithms, such as slotted-Aloha, are still not
clearly understood. In this paper we provide a general
and accurate method to analyze networks where
interfering users share a resource using random MAC
algorithms. We show that this method is asymptotically
exact when the number of users grows large, and explain
why it also provides extremely accurate performance
estimates even for small systems. We apply this
analysis to solve two open problems: (a) We address the
stability region of non-adaptive Aloha-like systems.
Specifically, we consider a fixed number of buffered
users receiving packets from independent exogenous
processes and accessing the resource using Aloha-like
algorithms. We provide an explicit expression to
approximate the stability region of this system, and
prove its accuracy. (b) We outline how to apply the
analysis to predict the performance of adaptive MAC
algorithms, such as the exponential back-off algorithm,
in a system where saturated users interact through
interference. In general, our analysis may be used to
quantify how far from optimality the simple MAC
algorithms used in LANs today are, and to determine if
more complicated (e.g. queue-based) algorithms proposed
in the literature could provide significant improvement
in performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "aloha/CSMA; exponential back-off; stability",
}
@Article{Casale:2008:BAC,
author = "Giuliano Casale and Ningfang Mi and Evgenia Smirni",
title = "Bound analysis of closed queueing networks with
workload burstiness",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "13--24",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375460",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Burstiness and temporal dependence in service
processes are often found in multi-tier architectures
and storage devices and must be captured accurately in
capacity planning models as these features are
responsible of significant performance degradations.
However, existing models and approximations for
networks of first-come first-served (FCFS) queues with
general independent (GI) service are unable to predict
performance of systems with temporal dependence in
workloads.\par
To overcome this difficulty, we define and study a
class of closed queueing networks where service times
are represented by Markovian Arrival Processes (MAPs),
a class of point processes that can model general
distributions, but also temporal dependent features
such as burstiness in service times. We call these
models MAP queueing networks. We introduce provable
upper and lower bounds for arbitrary performance
indexes (e.g., throughput, response time, utilization)
that we call Linear Reduction (LR) bounds. Numerical
experiments indicate that LR bounds achieve a mean
accuracy error of 2 percent.\par
The result promotes LR bounds as a versatile and
reliable bounding methodology of the performance of
modern computer systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bound analysis; burstiness; closed systems; Markovian
arrival processes; nonrenewal service; queueing
networks; temporal dependence",
}
@Article{Wierman:2008:SDI,
author = "Adam Wierman and Misja Nuyens",
title = "Scheduling despite inexact job-size information",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "25--36",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375461",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Motivated by the optimality of Shortest Remaining
Processing Time (SRPT) for mean response time, in
recent years many computer systems have used the
heuristic of `favoring small jobs' in order to
dramatically reduce user response times. However,
rarely do computer systems have knowledge of exact
remaining sizes. In this paper, we introduce the class
of $ \epsilon $-SMART policies, which formalizes the
heuristic of `favoring small jobs' in a way that
includes a wide range of policies that schedule using
inexact job-size information. Examples of $ \epsilon
$-SMART policies include (i) policies that use exact
size information, e.g., SRPT and PSJF, (ii) policies
that use job-size estimates, and (iii) policies that
use a finite number of size-based priority
levels.\par
For many $ \epsilon $-SMART policies, e.g., SRPT with
inexact job-size information, there are no analytic
results available in the literature. In this work, we
prove four main results: we derive upper and lower
bounds on the mean response time, the mean slowdown,
the response-time tail, and the conditional response
time of $ \epsilon $-SMART policies. In each case, the
results explicitly characterize the tradeoff between
the accuracy of the job-size information used to
prioritize and the performance of the resulting policy.
Thus, the results provide designers insight into how
accurate job-size information must be in order to
achieve desired performance guarantees.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "job size estimates; M/G/1; queueing; response time;
scheduling; shortest remaining processing time; SMART;
SRPT",
}
@Article{Lelarge:2008:NED,
author = "Marc Lelarge and Jean Bolot",
title = "Network externalities and the deployment of security
features and protocols in the {Internet}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "37--48",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375463",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Getting new security features and protocols to be
widely adopted and deployed in the Internet has been a
continuing challenge. There are several reasons for
this, in particular economic reasons arising from the
presence of network externalities. Indeed, like the
Internet itself, the technologies to secure it exhibit
network effects: their value to individual users
changes as other users decide to adopt them or not. In
particular, the benefits felt by early adopters of
security solutions might fall significantly below the
cost of adoption, making it difficult for those
solutions to gain attraction and get deployed at a
large scale.\par
Our goal in this paper is to model and quantify the
impact of such externalities on the adoptability and
deployment of security features and protocols in the
Internet. We study a network of interconnected agents,
which are subject to epidemic risks such as those
caused by propagating viruses and worms, and which can
decide whether or not to invest some amount to deploy
security solutions. Agents experience negative
externalities from other agents, as the risks faced by
an agent depend not only on the choices of that agent
(whether or not to invest in self-protection), but also
on those of the other agents. Expectations about
choices made by other agents then influence investments
in self-protection, resulting in a possibly suboptimal
outcome overall.\par
We present and solve an analytical model where the
agents are connected according to a variety of network
topologies. Borrowing ideas and techniques used in
statistical physics, we derive analytic solutions for
sparse random graphs, for which we obtain asymptotic
results. We show that we can explicitly identify the
impact of network externalities on the adoptability and
deployment of security features. In other words, we
identify both the economic and network properties that
determine the adoption of security technologies.
Therefore, we expect our results to provide useful
guidance for the design of new economic mechanisms and
for the development of network protocols likely to be
deployed at a large scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cascading; economics; epidemics; game theory; price of
anarchy; security",
}
@Article{Brosh:2008:DFT,
author = "Eli Brosh and Salman Abdul Baset and Dan Rubenstein
and Henning Schulzrinne",
title = "The delay-friendliness of {TCP}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "49--60",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375464",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "TCP has traditionally been considered unfriendly for
real-time applications. Nonetheless, popular
applications such as Skype use TCP since UDP packets
cannot pass through many NATs and firewalls. Motivated
by this observation, we study the delay performance of
TCP for real-time media flows. We develop an analytical
performance model for the delay of TCP. We use
extensive experiments to validate the model and to
evaluate the impact of various TCP mechanisms on its
delay performance. Based on our results, we derive the
working region for VoIP and live video streaming
applications and provide guidelines for delay-friendly
TCP settings. Our research indicates that simple
application-level schemes, such as packet splitting and
parallel connections, can reduce the delay of real-time
TCP flows by as much as 30\% and 90\%, respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "live video streaming; performance modeling; TDP
congestion control; VoIP",
}
@Article{Kim:2008:SVR,
author = "Changhoon Kim and Alexandre Gerber and Carsten Lund
and Dan Pei and Subhabrata Sen",
title = "Scalable {VPN} routing via relaying",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "61--72",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375465",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Enterprise customers are increasingly adopting MPLS
(Multiprotocol Label Switching) VPN (Virtual Private
Network) service that offers direct any-to-any
reachability among the customer sites via a provider
network. Unfortunately this direct reachability model
makes the service provider's routing tables grow very
large as the number of VPNs and the number of routes
per customer increase. As a result, router memory in
the provider's network has become a key bottleneck in
provisioning new customers. This paper proposes
Relaying, a scalable VPN routing architecture that the
provider can implement simply by modifying the
configuration of routers in the provider network,
without requiring changes to the router hardware and
software. Relaying substantially reduces the memory
footprint of VPNs by choosing a small number of hub
routers in each VPN that maintain full reachability
information, and by allowing non-hub routers to reach
other routers through a hub. Deploying Relaying in
practice, however, poses a challenging optimization
problem that involves minimizing router memory usage by
having as few hubs as possible, while limiting the
additional latency due to indirect delivery via a hub.
We first investigate the fundamental tension between
the two objectives and then develop algorithms to solve
the optimization problem by leveraging some unique
properties of VPNs, such as sparsity of traffic
matrices and spatial locality of customer sites.
Extensive evaluations using real traffic matrices,
routing configurations, and VPN topologies demonstrate
that Relaying is very promising and can reduce
routing-table usage by up to 90\%, while increasing the
additional distances traversed by traffic by only a few
hundred miles, and the backbone bandwidth usage by less
than 10\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measurement; optimization; routing; VPN",
}
@Article{Tschopp:2008:HRD,
author = "Dominique Tschopp and Suhas Diggavi and Matthias
Grossglauser",
title = "Hierarchical routing over dynamic wireless networks",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "73--84",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375467",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In dynamic networks the topology evolves and routes
are maintained by frequent updates, consuming
throughput available for data transmission. We ask
whether there exist low-overhead schemes for these
networks, that produce routes that are within a small
constant factor (stretch) of the optimal route-length.
This is studied by using the underlying geometric
properties of the connectivity graph in wireless
networks. For a class of models for wireless network
that fulfill some mild conditions on the connectivity
and on mobility over the time of interest, we can
design distributed routing algorithm that maintain the
routes over a changing topology. This scheme needs only
node identities and integrates location service along
with routing, therefore accounting for the complete
overhead. We analyze the worst-case (conservative)
overhead and route-quality (stretch) performance of
this algorithm for the aforementioned class of models.
Our algorithm allows constant stretch routing with a
network wide control traffic overhead of $ O(n \log^2
n) $ bits per mobility time step (time-scale of
topology change) translating to $ O(\log^2 n) $
overhead per node (with high probability for wireless
networks with such mobility model). We can reduce the
maximum overhead per node by using a load-balancing
technique at the cost of a slightly higher average
overhead. Numerics show that these bounds are quite
conservative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "competitive analysis; distributed routing algorithms;
geometric random graphs; wireless networks",
}
@Article{Rayanchu:2008:LAN,
author = "Shravan Rayanchu and Sayandeep Sen and Jianming Wu and
Suman Banerjee and Sudipta Sengupta",
title = "Loss-aware network coding for unicast wireless
sessions: design, implementation, and performance
evaluation",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "85--96",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375468",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Local network coding is growing in prominence as a
technique to facilitate greater capacity utilization in
multi-hop wireless networks. A specific objective of
such local network coding techniques has been to
explicitly minimize the total number of transmissions
needed to carry packets across each wireless hop. While
such a strategy is certainly useful, we argue that in
lossy wireless environments, a better use of local
network coding is to provide higher levels of
redundancy even at the cost of increasing the number of
transmissions required to communicate the same
information. In this paper we show that the design
space for effective redundancy in local network coding
is quite large, which makes optimal formulations of the
problem hard to realize in practice. We present a
detailed exploration of this design space and propose a
suite of algorithms, called CLONE, that can lead to
further throughput gains in multi-hop wireless
scenarios. Through careful analysis, simulations, and
detailed implementation on a real testbed, we show that
some of our simplest CLONE algorithms can be
efficiently implemented in today's wireless hardware to
provide a factor of two improvement in throughput for
example scenarios, while other, more effective, CLONE
algorithms require additional advances in hardware
processing speeds to be deployable in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "IEEE 802.11; network coding; wireless networks",
}
@Article{Schmid:2008:EMV,
author = "Thomas Schmid and Zainul Charbiwala and Jonathan
Friedman and Young H. Cho and Mani B. Srivastava",
title = "Exploiting manufacturing variations for compensating
environment-induced clock drift in time
synchronization",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "97--108",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375469",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Time synchronization is an essential service in
distributed computing and control systems. It is used
to enable tasks such as synchronized data sampling and
accurate time-of-flight estimation, which can be used
to locate nodes. The deviation in nodes' knowledge of
time and inter-node resynchronization rate are affected
by three sources of time stamping errors: network
wireless communication delays, platform hardware and
software delays, and environment-dependent frequency
drift characteristics of the clock source. The focus of
this work is on the last source of error, the clock
source, which becomes a bottleneck when either required
time accuracy or available energy budget and bandwidth
(and thus feasible resynchronization rate) are too
stringent. Traditionally, this has required the use of
expensive clock sources (such as temperature
compensation using precise sensors and calibration
models) that are not cost-effective in low-end wireless
sensor nodes. Since the frequency of a crystal is a
product of manufacturing and environmental parameters,
we describe an approach that exploits the subtle
manufacturing variation between a pair of inexpensive
oscillators placed in close proximity to
algorithmically compensate for the drift produced by
the environment. The algorithm effectively uses the
oscillators themselves as a sensor that can detect
changes in frequency caused by a variety of
environmental factors. We analyze the performance of
our approach using behavioral models of crystal
oscillators in our algorithm simulation. Then we apply
the algorithm to an actual temperature dataset
collected at the James Wildlife Reserve in Riverside
County, California, and test the algorithms on a
waveform generator based testbed. The result of our
experiments show that the technique can effectively
improve the frequency stability of an inexpensive
uncompensated crystal 5 times with the potential for
even higher gains in future implementations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clocks; emulation; oscillator; time synchronization",
}
@Article{Cohen:2008:CEM,
author = "Edith Cohen and Nick Duffield and Carsten Lund and
Mikkel Thorup",
title = "Confident estimation for multistage measurement
sampling and aggregation",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "109--120",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375471",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measurement, collection, and interpretation of network
usage data commonly involves multiple stage of sampling
and aggregation. Examples include sampling packets,
aggregating them into flow statistics at a router,
sampling and aggregation of usage records in a network
data repository for reporting, query and archiving.
Although unbiased estimates of packet, bytes and flows
usage can be formed for each sampling operation, for
many applications it is crucial to know the inherent
estimation error. Previous work in this area has been
limited mainly to analyzing the estimator variance for
particular methods, e.g., independent packet sampling.
However, the variance is of limited use for more
general sampling methods, where the estimate may not be
well approximated by a Gaussian distribution.\par
This motivates our paper, in which we establish
Chernoff bounds on the likelihood of estimation error
in a general multistage combination of measurement
sampling and aggregation. We derive the scale against
which errors are measured, in terms of the constituent
sampling and aggregation operations. In particular this
enables us to obtain rigorous confidence intervals
around any given estimate. We apply our method to a
number of sampling schemes both in the literature and
currently deployed, including sampling of packet
sampled NetFlow records, Sample and Hold, and Flow
Slicing. We obtain one particularly striking result in
the first case: that for a range of parameterizations,
packet sampling has no additional impact on the
estimator confidence derived from our bound, beyond
that already imposed by flow sampling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "confidence intervals; estimation; network measurement;
sampling",
}
@Article{Lu:2008:CBN,
author = "Yi Lu and Andrea Montanari and Balaji Prabhakar and
Sarang Dharmapurikar and Abdul Kabbani",
title = "Counter braids: a novel counter architecture for
per-flow measurement",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "121--132",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375472",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fine-grained network measurement requires routers and
switches to update large arrays of counters at very
high link speed (e.g. 40 Gbps). A naive algorithm needs
an infeasible amount of SRAM to store both the counters
and a flow-to-counter association rule, so that
arriving packets can update corresponding counters at
link speed. This has made accurate per-flow measurement
complex and expensive, and motivated approximate
methods that detect and measure only the large
flows.\par
This paper revisits the problem of accurate per-flow
measurement. We present a counter architecture, called
Counter Braids, inspired by sparse random graph codes.
In a nutshell, Counter Braids `compresses while
counting'. It solves the central problems (counter
space and flow-to-counter association) of per-flow
measurement by `braiding' a hierarchy of counters with
random graphs. Braiding results in drastic space
reduction by sharing counters among flows; and using
random graphs generated on-the-fly with hash functions
avoids the storage of flow-to-counter
association.\par
The Counter Braids architecture is optimal (albeit with
a complex decoder) as it achieves the maximum
compression rate asymptotically. For implementation, we
present a low-complexity message passing decoding
algorithm, which can recover flow sizes with
essentially zero error. Evaluation on Internet traces
demonstrates that almost all flow sizes are recovered
exactly with only a few bits of counter space per
flow.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "message passing algorithms; network measurement;
statistic counters",
}
@Article{Anandkumar:2008:TSB,
author = "Animashree Anandkumar and Chatschik Bisdikian and
Dakshi Agrawal",
title = "Tracking in a spaghetti bowl: monitoring transactions
using footprints",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "133--144",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375473",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The problem of tracking end-to-end service-level
transactions in the absence of instrumentation support
is considered. The transaction instances progress
through a state-transition model and generate
time-stamped footprints on entering each state in the
model. The goal is to track individual transactions
using these footprints even when the footprints may not
contain any tokens uniquely identifying the transaction
instances that generated them. Assuming a semi-Markov
process model for state transitions, the transaction
instances are tracked probabilistically by matching
them to the available footprints according to the
maximum likelihood (ML) criterion. Under the ML-rule,
for a two-state system, it is shown that the
probability that all the instances are matched
correctly is minimized when the transition times are
i.i.d. exponentially distributed. When the transition
times are i.i.d. distributed, the ML-rule reduces to a
minimum weight bipartite matching and reduces further
to a first-in first-out match for a special class of
distributions. For a multi-state model with an acyclic
state transition digraph, a constructive proof shows
that the ML-rule reduces to splicing the results of
independent matching of many bipartite systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bipartite matching; maximum-likelihood tracking;
semi-Markov process; transaction monitoring",
}
@Article{Singhal:2008:OSS,
author = "Harsh Singhal and George Michailidis",
title = "Optimal sampling in state space models with
applications to network monitoring",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "145--156",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375474",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advances in networking technology have enabled network
engineers to use sampled data from routers to estimate
network flow volumes and track them over time. However,
low sampling rates result in large noise in traffic
volume estimates. We propose to combine data on
individual flows obtained from sampling with highly
aggregate data obtained from SNMP measurements (similar
to those used in network tomography) for the tracking
problem at hand. Specifically, we introduce a
linearized state space model for the estimation of
network traffic flow volumes from combined SNMP and
sampled data. Further, we formulate the problem of
obtaining optimal sampling rates under router resource
constraints as an experiment design problem.
Theoretically it corresponds to the problem of optimal
design for estimation of conditional means for state
space models and we present the associated convex
programs for a simple approach to it. The usefulness of
the approach in the context of network monitoring is
illustrated through an extensive numerical study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "internet traffic matrix estimation; Kalman filtering;
optimal design of experiments; state space models",
}
@Article{Ioannidis:2008:DHP,
author = "Stratis Ioannidis and Peter Marbach",
title = "On the design of hybrid peer-to-peer systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "157--168",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375476",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider hybrid peer-to-peer systems
where users form an unstructured peer-to-peer network
with the purpose of assisting a server in the
distribution of data. We present a mathematical model
that we use to analyze the scalability of hybrid
peer-to-peer systems under two query propagation
mechanisms: the random walk and the expanding ring. In
particular, we characterize how the query load at the
server, the load at peers as well as the query response
time scale as the number of users in the peer-to-peer
network increases. We show that, under a properly
designed random walk propagation mechanism, hybrid
peer-to-peer systems can support an unbounded number of
users while requiring only bounded resources both at
the server and at individual peers. This important
result shows that hybrid peer-to-peer systems have
excellent scalability properties. To the best of our
knowledge, this is the first time that a theoretical
study characterizing the scalability of such hybrid
peer-to-peer systems has been presented. We illustrate
our results through numerical studies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "peer-to-peer; scalability",
}
@Article{Chen:2008:UMP,
author = "Minghua Chen and Miroslav Ponec and Sudipta Sengupta
and Jin Li and Philip A. Chou",
title = "Utility maximization in peer-to-peer systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "169--180",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375477",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study the problem of utility
maximization in P2P systems, in which aggregate
application-specific utilities are maximized by running
distributed algorithms on P2P nodes, which are
constrained by their uplink capacities. This may be
understood as extending Kelly's seminal framework from
single-path unicast over general topology to multi-path
multicast over P2P topology, with network coding
allowed. For certain classes of popular P2P topologies,
we show that routing along a linear number of trees per
source can achieve the largest rate region that can be
possibly obtained by (multi-source) network coding.
This simplification result allows us to develop a new
multi-tree routing formulation for the problem. Despite
of the negative results in literature on applying
Primal-dual algorithms to maximize utility under
multi-path settings, we have been able to develop a
Primal-dual distributed algorithm to maximize the
aggregate utility under the multi-path routing
environments. Utilizing our proposed sufficient
condition, we show global exponential convergence of
the Primal-dual algorithm to the optimal solution under
different P2P communication scenarios we study. The
algorithm can be implemented by utilizing only
end-to-end delay measurements between P2P nodes; hence,
it can be readily deployed on today's Internet. To
support this claim, we have implemented the Primal-dual
algorithm for use in a peer-assisted multi-party
conferencing system and evaluated its performance
through actual experiments on a LAN testbed and the
Internet.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "content distribution; multi-party video conferencing;
multicast; peer-to-peer; streaming; utility
maximization",
}
@Article{Simatos:2008:QSM,
author = "Florian Simatos and Philippe Robert and Fabrice
Guillemin",
title = "A queueing system for modeling a file sharing
principle",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "181--192",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375478",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We investigate in this paper the performance of a
simple file sharing principle. For this purpose, we
consider a system composed of N peers becoming active
at exponential random times; the system is initiated
with only one server offering the desired file and the
other peers after becoming active try to download it.
Once the file has been downloaded by a peer, this one
immediately becomes a server. To investigate the
transient behavior of this file sharing system, we
study the instant when the system shifts from a
congested state where all servers available are
saturated by incoming demands to a state where a
growing number of servers are idle. In spite of its
apparent simplicity, this queueing model (with a random
number of servers) turns out to be quite difficult to
analyze. A formulation in terms of an urn and ball
model is proposed and corresponding scaling results are
derived. These asymptotic results are then compared
against simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "file sharing; peer to peer; queueing systems;
transient analysis of Markov processes",
}
@Article{Goldberg:2008:PQM,
author = "Sharon Goldberg and David Xiao and Eran Tromer and
Boaz Barak and Jennifer Rexford",
title = "Path-quality monitoring in the presence of
adversaries",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "193--204",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375480",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Edge networks connected to the Internet need effective
monitoring techniques to drive routing decisions and
detect violations of Service Level Agreements (SLAs).
However, existing measurement tools, like ping,
traceroute, and trajectory sampling, are vulnerable to
attacks that can make a path look better than it really
is. In this paper, we design and analyze path-quality
monitoring protocols that reliably raise an alarm when
the packet-loss rate and delay exceed a threshold, even
when an adversary tries to bias monitoring results by
selectively delaying, dropping, modifying, injecting,
or preferentially treating packets.\par
Despite the strong threat model we consider in this
paper, our protocols are efficient enough to run at
line rate on high-speed routers. We present a secure
sketching protocol for identifying when packet loss and
delay degrade beyond a threshold. This protocol is
extremely lightweight, requiring only 250-600 bytes of
storage and periodic transmission of a comparably sized
IP packet to monitor billions of packets. We also
present secure sampling protocols that provide faster
feedback and accurate round-trip delay estimates, at
the expense of somewhat higher storage and
communication costs. We prove that all our protocols
satisfy a precise definition of secure path-quality
monitoring and derive analytic expressions for the
trade-off between statistical accuracy and system
overhead. We also compare how our protocols perform in
the client-server setting, when paths are asymmetric,
and when packet marking is not permitted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cryptography; path-quality monitoring; sampling;
sketching",
}
@Article{Pedarsani:2008:DAS,
author = "Pedram Pedarsani and Daniel R. Figueiredo and Matthias
Grossglauser",
title = "Densification arising from sampling fixed graphs",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "205--216",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375481",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "During the past decade, a number of different studies
have identified several peculiar properties of networks
that arise from a diverse universe, ranging from social
to computer networks. A recently observed feature is
known as network densification, which occurs when the
number of edges grows much faster than the number of
nodes, as the network evolves over time. This
surprising phenomenon has been empirically validated in
a variety of networks that emerge in the real world and
mathematical models have been recently proposed to
explain it. Leveraging on how real data is usually
gathered and used, we propose a new model called Edge
Sampling to explain how densification can arise. Our
model is innovative, as we consider a fixed underlying
graph and a process that discovers this graph by
probabilistically sampling its edges. We show that this
model possesses several interesting features, in
particular, that edges and nodes discovered can exhibit
densification. Moreover, when the node degree of the
fixed underlying graph follows a heavy-tailed
distribution, we show that the Edge Sampling model can
yield power law densification, establishing an
approximate relationship between the degree exponent
and the densification exponent. The theoretical
findings are supported by numerical evaluations of the
model. Finally, we apply our model to real network data
to evaluate its performance on capturing the previously
observed densification. Our results indicate that edge
sampling is indeed a plausible alternative explanation
for the densification phenomenon that has been recently
observed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "densification; edge sampling; network modeling",
}
@Article{Oliveira:2008:SEG,
author = "Ricardo V. Oliveira and Dan Pei and Walter Willinger
and Beichuan Zhang and Lixia Zhang",
title = "In search of the elusive ground truth: the
{Internet}'s as-level connectivity structure",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "217--228",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375482",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Despite significant efforts to obtain an accurate
picture of the Internet's actual connectivity structure
at the level of individual autonomous systems (ASes),
much has remained unknown in terms of the quality of
the inferred AS maps that have been widely used by the
research community. In this paper we assess the quality
of the inferred Internet maps through case studies of a
set of ASes. These case studies allow us to establish
the ground truth of AS-level Internet connectivity
between the set of ASes and their directly connected
neighbors. They also enable a direct comparison between
the ground truth and inferred topology maps and yield
new insights into questions such as which parts of the
actual topology are adequately captured by the inferred
maps, and which parts are missing and why. This
information is critical in assessing for what kinds of
real-world networking problems the use of currently
inferred AS maps or proposed AS topology models are, or
are not, appropriate. More importantly, our newly
gained insights also point to new directions towards
building realistic and economically viable Internet
topology maps.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; inter-domain routing; Internet topology",
}
@Article{Bao:2008:HPI,
author = "Yungang Bao and Mingyu Chen and Yuan Ruan and Li Liu
and Jianping Fan and Qingbo Yuan and Bo Song and
Jianwei Xu",
title = "{HMTT}: a platform independent full-system memory
trace monitoring system",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "229--240",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375484",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Memory trace analysis is an important technology for
architecture research, system software (i.e., OS,
compiler) optimization, and application performance
improvements. Many approaches have been used to track
memory trace, such as simulation, binary
instrumentation and hardware snooping. However, they
usually have limitations of time, accuracy and
capacity.\par
In this paper we propose a platform independent memory
trace monitoring system, which is able to track virtual
memory reference trace of full systems (including OS,
VMMs, libraries, and applications). The system adopts a
DIMM-snooping mechanism that uses hardware boards
plugged in DIMM slots to snoop. There are several
advantages in this approach, such as fast, complete,
undistorted, and portable. Three key techniques are
proposed to address the system design challenges with
this mechanism: (1) To keep up with memory speeds, the
DDR protocol state machine is simplified, and large
FIFOs are added between the state machine and the trace
transmitting logic to handle burst memory accesses; (2)
To reconstruct physical-to-virtual mapping and
distinguish one process' address space from others, an
OS kernel module, which collects page table
information, and a synchronization mechanism, which
synchronizes the page table information with the memory
race, are developed; (3) To dump massive trace data, we
employ a straightforward method to compress the trace
and use Gigabit Ethernet and RAID to send and receive
the compressed trace.\par
We present our implementation of an initial monitoring
system, named HMTT (Hyper Memory Trace Tracker). Using
HMTT, we have observed that burst bandwidth utilization
is much larger than average bandwidth utilization, by
up to 5X in desktop applications. We have also
confirmed that the stream memory accesses of many
applications contribute even more than 40\% of L2 Cache
misses and OS virtual memory management may decrease
stream accesses in view of memory controller (or L2
Cache), by up to 30.2\%. Moreover, we have evaluated OS
impact on memory performance in real systems. The
evaluations and case studies show the feasibility and
effectiveness of our proposed monitoring mechanism and
techniques.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DIMM; HMTT; memory trace; real system",
}
@Article{Iliadis:2008:DSV,
author = "Ilias Iliadis and Robert Haas and Xiao-Yu Hu and
Evangelos Eleftheriou",
title = "Disk scrubbing versus intra-disk redundancy for
high-reliability raid storage systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "241--252",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375485",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Two schemes proposed to cope with unrecoverable or
latent media errors and enhance the reliability of RAID
systems are examined. The first scheme is the
established, widely used disk scrubbing scheme, which
operates by periodically accessing disk drives to
detect media-related unrecoverable errors. These errors
are subsequently corrected by rebuilding the sectors
affected. The second scheme is the recently proposed
intradisk redundancy scheme which uses a further level
of redundancy inside each disk, in addition to the RAID
redundancy across multiple disks. Analytic results are
obtained assuming Poisson arrivals of random I/O
requests. Our results demonstrate that the reliability
improvement due to disk scrubbing depends on the
scrubbing frequency and the workload of the system, and
may not reach the reliability level achieved by a
simple IPC-based intra-disk redundancy scheme, which is
insensitive to the workload. In fact, the IPC-based
intra-disk redundancy scheme achieves essentially the
same reliability as that of a system operating without
unrecoverable sector errors. For heavy workloads, the
reliability achieved by the scrubbing scheme can be
orders of magnitude less than that of the intra-disk
redundancy scheme.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "MTTDL; RAID; reliability analysis; stochastic
modeling; unrecoverable or latent sector errors",
}
@Article{Thereska:2008:IRP,
author = "Eno Thereska and Gregory R. Ganger",
title = "{Ironmodel}: robust performance models in the wild",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "253--264",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375486",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traditional performance models are too brittle to be
relied on for continuous capacity planning and
performance debugging in many computer systems. Simply
put, a brittle model is often inaccurate and incorrect.
We find two types of reasons why a model's prediction
might diverge from the reality: (1) the underlying
system might be misconfigured or buggy or (2) the
model's assumptions might be incorrect. The extra
effort of manually finding and fixing the source of
these discrepancies, continuously, in both the system
and model, is one reason why many system designers and
administrators avoid using mathematical models
altogether. Instead, they opt for simple, but often
inaccurate, `rules-of-thumb'.\par
This paper describes IRONModel, a robust performance
modeling architecture. Through studying performance
anomalies encountered in an experimental cluster-based
storage system, we analyze why and how models and
actual system implementations get out-of-sync. Lessons
learned from that study are incorporated into
IRONModel. IRONModel leverages the redundancy of
high-level system specifications described through
models and low-level system implementation to localize
many types of system-model inconsistencies. IRONModel
can guide designers to the potential source of the
discrepancy, and, if appropriate, can
semi-automatically evolve the models to handle
unanticipated inputs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "active probing; behavioral modeling; management;
what-if",
}
@Article{Liu:2008:XFS,
author = "Alex X. Liu and Fei Chen and JeeHyun Hwang and Tao
Xie",
title = "{Xengine}: a fast and scalable {XACML} policy
evaluation engine",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "265--276",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375488",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "XACML has become the de facto standard for specifying
access control policies for various applications,
especially web services. With the explosive growth of
web applications deployed on the Internet, XACML
policies grow rapidly in size and complexity, which
leads to longer request processing time. This paper
concerns the performance of request processing, which
is a critical issue and so far has been overlooked by
the research community. In this paper, we propose
XEngine, a scheme for efficient XACML policy
evaluation. XEngine first converts a textual XACML
policy to a numerical policy. Second, it converts a
numerical policy with complex structures to a numerical
policy with a normalized structure. Third, it converts
the normalized numerical policy to tree data structures
for efficient processing of requests. To evaluate the
performance of XEngine, we conducted extensive
experiments on both real-life and synthetic XACML
policies. The experimental results show that XEngine is
orders of magnitude more efficient than Sun PDP, and
the performance difference between XEngine and Sun PDP
grows almost linearly with the number of rules in XACML
policies. For XACML policies of small sizes (with
hundreds of rules), XEngine is one to two orders of
magnitude faster than the widely deployed Sun PDP. For
XACML policies of large sizes (with thousands of
rules), XEngine is three to four orders of magnitude
faster than Sun PDP.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "access control; policy decision point (PDP); policy
enforcement point (PEP); policy evaluation; web server;
XACML",
}
@Article{Traeger:2008:DDA,
author = "Avishay Traeger and Ivan Deras and Erez Zadok",
title = "{DARC}: dynamic analysis of root causes of latency
distributions",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "277--288",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375489",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "OSprof is a versatile, portable, and efficient
profiling methodology based on the analysis of latency
distributions. Although OSprof has offers several
unique benefits and has been used to uncover several
interesting performance problems, the latency
distributions that it provides must be analyzed
manually. These latency distributions are presented as
histograms and contain distinct groups of data, called
peaks, that characterize the overall behavior of the
running code. By automating the analysis process, we
make it easier to take advantage of OSprof's unique
features.\par
We have developed the Dynamic Analysis of Root Causes
system (DARC), which finds root cause paths in a
running program's call-graph using runtime latency
analysis. A root cause path is a call-path that starts
at a given function and includes the largest latency
contributors to a given peak. These paths are the main
causes for the high-level behavior that is represented
as a peak in an OSprof histogram. DARC performs PID and
call-path filtering to reduce overheads and
perturbations, and can handle recursive and indirect
calls. DARC can analyze preemptive behavior and
asynchronous call-paths, and can also resume its
analysis from a previous state, which is useful when
analyzing short-running programs or specific phases of
a program's execution.\par
We present DARC and show its usefulness by analyzing
behaviors that were observed in several interesting
scenarios. We also show that DARC has negligible
elapsed time overheads for normal use cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic instrumentation; root cause",
}
@Article{Chaitanya:2008:QQM,
author = "Shiva Chaitanya and Bhuvan Urgaonkar and Anand
Sivasubramaniam",
title = "{QDSL}: a queuing model for systems with differential
service levels",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "289--300",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375490",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A feature exhibited by many modern computing systems
is their ability to improve the quality of output they
generate for a given input by spending more computing
resources on processing it. Often this improvement
comes at the price of degraded performance in the form
of reduced throughput or increased response time. We
formulate QDSL, a class of constrained optimization
problems defined in the context of a queueing server
equipped with multiple levels of service. Solutions to
QDSL provide rules for dynamically varying the service
level to achieve desired trade-offs between output
quality and performance. Our approach involves reducing
restricted versions of such systems to Markov Decision
Processes. We find two variants of such systems worth
studying: (i) VarSL, in which a single request may be
serviced using a combination of multiple levels during
its lifetime and (ii) FixSL in which the service level
may not change during the lifetime of a request. Our
modeling indicates that optimal service level selection
policies in these systems correspond to very simple
rules that can be implemented very efficiently in
realistic, online systems. We find our policies to be
useful in two response-time-sensitive real-world
systems: (i) qSecStore, an iSCSI-based secure storage
system that has access to multiple encryption
functions, and (ii) qPowServer, a server with
DVFS-capable processor. As a representative result, in
an instance of qSecStore serving disk requests derived
from the well-regarded TPC-H traces, we are able to
improve the fraction of requests using more reliable
encryption functions by 40-60\%, while meeting
performance targets. In a simulation of qPowServer
employing realistic DVFS parameters, we are able to
improve response times significantly while only
violating specified server-wide power budgets by less
than 5W.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "differential service levels; dynamic voltage frequency
scaling; Markov decision process; secure storage",
}
@Article{Parvez:2008:ABL,
author = "Nadim Parvez and Carey Williamson and Anirban Mahanti
and Niklas Carlsson",
title = "Analysis of {BitTorrent}-like protocols for on-demand
stored media streaming",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "301--312",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375492",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper develops analytic models that characterize
the behavior of on-demand stored media content delivery
using BitTorrent-like protocols. The models capture the
effects of different piece selection policies,
including Rarest-First and two variants of In-Order.
Our models provide insight into transient and
steady-state system behavior, and help explain the
sluggishness of the system with strict In-Order
streaming. We use the models to compare different
retrieval policies across a wide range of system
parameters, including peer arrival rate,
upload/download bandwidth, and seed residence time. We
also provide quantitative results on the startup delays
and retrieval times for streaming media delivery. Our
results provide insights into the optimal design of
peer-to-peer networks for on-demand media streaming.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; on-demand streaming; peer-to-peer
systems",
}
@Article{Liu:2008:PBP,
author = "Shao Liu and Rui Zhang-Shen and Wenjie Jiang and
Jennifer Rexford and Mung Chiang",
title = "Performance bounds for peer-assisted live streaming",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "313--324",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375493",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Peer-assisted streaming is a promising way for service
providers to offer high-quality IPTV to consumers at
reasonable cost. In peer-assisted streaming, the peers
exchange video chunks with one another, and receive
additional data from the central server as needed. In
this paper, we analyze how to provision resources for
the streaming system, in terms of the server capacity,
the video quality, and the depth of the distribution
trees that deliver the content. We derive the
performance bounds for minimum server load, maximum
streaming rate, and minimum tree depth under different
peer selection constraints. Furthermore, we show that
our performance bounds are actually tight, by
presenting algorithms for constructing trees that
achieve our bounds.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "IPTV; peer-to-peer; streaming; tree construction;
video",
}
@Article{Bonald:2008:ELS,
author = "Thomas Bonald and Laurent Massouli{\'e} and Fabien
Mathieu and Diego Perino and Andrew Twigg",
title = "Epidemic live streaming: optimal performance
trade-offs",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "325--336",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375494",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several peer-to-peer systems for live streaming have
been recently deployed (e.g. CoolStreaming, PPLive,
SopCast). These all rely on distributed, epidemic-style
dissemination mechanisms. Despite their popularity, the
fundamental performance trade-offs of such mechanisms
are still poorly understood. In this paper we propose
several results that contribute to the understanding of
such trade-offs.\par
Specifically, we prove that the so-called random peer,
latest useful chunk mechanism can achieve dissemination
at an optimal rate and within an optimal delay, up to
an additive constant term. This qualitative result
suggests that epidemic live streaming algorithms can
achieve near-unbeatable rates and delays. Using
mean-field approximations, we also derive recursive
formulas for the diffusion function of two schemes
referred to as latest blind chunk, random peer and
latest blind chunk, random useful peer.\par
Finally, we provide simulation results that validate
the above theoretical results and allow us to compare
the performance of various practically interesting
diffusion schemes terms of delay, rate, and control
overhead. In particular, we identify several peer/chunk
selection algorithms that achieve near-optimal
performance trade-offs. Moreover, we show that the
control overhead needed to implement these algorithms
may be reduced by restricting the neighborhood of each
peer without substantial performance degradation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay optimality; epidemic diffusion; p2p live
streaming",
}
@Article{Lin:2008:STM,
author = "Jiang Lin and Hongzhong Zheng and Zhichun Zhu and
Eugene Gorbatov and Howard David and Zhao Zhang",
title = "Software thermal management of {DRAM} memory for
multicore systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "337--348",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375496",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Thermal management of DRAM memory has become a
critical issue for server systems. We have done, to our
best knowledge, the first study of software thermal
management for memory subsystem on real machines. Two
recently proposed DTM (Dynamic Thermal Management)
policies have been improved and implemented in Linux OS
and evaluated on two multicore servers, a Dell
PowerEdge 1950 server and a customized Intel SR1500AL
server testbed. The experimental results first confirm
that a system-level memory DTM policy may significantly
improve system performance and power efficiency,
compared with existing memory bandwidth throttling
scheme. A policy called DTM-ACG (Adaptive Core Gating)
shows performance improvement comparable to that
reported previously. The average performance
improvements are 13.3\% and 7.2\% on the PowerEdge 1950
and the SR1500AL (vs. 16.3\% from the previous
simulation-based study), respectively. We also have
surprising findings that reveal the weakness of the
previous study: the CPU heat dissipation and its impact
on DRAM memories, which were ignored, are significant
factors. We have observed that the second policy,
called DTM-CDVFS (Coordinated Dynamic Voltage and
Frequency Scaling), has much better performance than
previously reported for this reason. The average
improvements are 10.8\% and 15.3\% on the two machines
(vs. 3.4\% from the previous study), respectively. It
also significantly reduces the processor power by
15.5\% and energy by 22.7\% on average.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "DRAM memories; thermal management",
}
@Article{Menache:2008:NPC,
author = "Ishai Menache and Nahum Shimkin",
title = "Noncooperative power control and transmission
scheduling in wireless collision channels",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "349--358",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375497",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a wireless collision channel, shared by a
finite number of mobile users who transmit to a common
base station using a random access protocol. Mobiles
are self-optimizing, and wish to minimize their
individual average power investment subject to
minimum-throughput demand. The channel state between
each mobile and the base station is stochastically
time-varying and is observed by the mobile prior to
transmission. Given the current channel state, a mobile
may decide whether to transmit or not, and to determine
the transmission power in case of transmission. In this
paper, we investigate the properties of the Nash
equilibrium of the resulting game in multiuser
networks.\par
We characterize the best-response strategy of the
mobile and show that it leads to a `water-filling'-like
power allocation. Our equilibrium analysis then reveals
that one of the possible equilibria is uniformly best
for all mobiles. Furthermore, this equilibrium can be
reached by a simple distributed mechanism that does not
require specific information on other mobiles' actions.
We then explore some additional characteristics of the
distributed power control framework. Braess-like
paradoxes are reported, where the use of multiple power
levels can diminish system capacity and also lead to
larger per-user power consumption, compared to the case
where a single level only is permitted.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel state information; non-cooperative multiple
access; power efficient Nash equilibrium; uplink
collision channel; water-filling power allocation",
}
@Article{Kandemir:2008:SDC,
author = "Mahmut Kandemir and Ozcan Ozturk",
title = "Software-directed combined {CPU}\slash link voltage
scaling for {NoC}-based {CMPs}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "359--370",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375498",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network-on-Chip (NoC) based chip multiprocessors
(CMPs) are expected to become more widespread in
future, in both high performance scientific computing
and low-end embedded computing. For many execution
environments that employ these systems, reducing power
consumption is an important goal. This paper presents a
software approach for reducing power consumption in
such systems through compiler-directed
voltage/frequency scaling. The unique characteristic of
this approach is that it scales the voltages and
frequencies of select CPUs and communication links in a
coordinated manner to maximize energy savings without
degrading performance. Our approach has three important
components. The first component is the identification
of phases in the application. The next step is to
determine the critical execution paths and slacks in
each phase. For implementing these two components, our
approach employs a novel parallel program
representation. The last component of our approach is
the assignment of voltages and frequencies to CPUs and
communication links to maximize energy savings. We use
integer linear programming (ILP) for this
voltage/frequency assignment problem. To test our
approach, we implemented it within a compilation
framework and conducted experiments with applications
from the SPEComp suite and SPECjbb. Our results show
that the proposed combined CPU/link scaling is much
more effective than scaling voltages of CPUs or
communication links in isolation. In addition, we
observed that the energy savings obtained are
consistent across a wide range of values of our major
simulation parameters such as the number of CPUs, the
number of voltage/frequency levels, and the
thread-to-CPU mapping.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CMP; communication link; compiler; cpu; NoC; voltage
scaling",
}
@Article{Crk:2008:IAE,
author = "Igor Crk and Mingsong Bi and Chris Gniady",
title = "Interaction-aware energy management for wireless
network cards",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "371--382",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375499",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless Network Interface Cards (WNICs) are part of
every portable device, where efficient energy
management plays a significant role in extending the
device's battery life. The goal of efficient energy
management is to match the performance of the WNIC to
the network activity shaped by a running application.
In the case of interactive applications on mobile
systems, network I/O is largely driven by user
interactions. Current solutions either require
application modifications or lack a sufficient context
of execution that is crucial in making accurate and
timely predictions. This paper proposes a range of
user-interaction-aware mechanisms that utilize a novel
approach of monitoring a user's interaction with
applications through the capture and classification of
mouse events. This approach yields considerable
improvements in energy savings and delay reductions of
the WNIC, while significantly improving the accuracy,
timeliness, and computational overhead of predictions
when compared to existing state-of-the-art solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "context-awareness; energy management; interaction
monitoring; prediction; resource management; wireless
network cards",
}
@Article{Stanojevi:2008:FDE,
author = "Rade Stanojevi and Robert Shorten",
title = "Fully decentralized emulation of best-effort and
processor sharing queues",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "383--394",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375501",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Control of large distributed cloud-based services is a
challenging problem. The Distributed Rate Limiting
(DRL) paradigm was recently proposed as a mechanism for
tackling this problem. The heuristic nature of existing
DRL solutions makes their behavior unpredictable and
analytically untractable. In this paper we treat the
DRL problem in a mathematical framework and propose two
novel DRL algorithms that exhibit good and predictable
performance. The first algorithm Cloud Control with
Constant Probabilities (C3P) solves the DRL problem in
best effort environments, emulating the behavior of a
single best-effort queue in a fully distributed manner.
The second problem we approach is the DRL in processor
sharing environments. Our algorithm, Distributed
Deficit Round Robin (D2R2), parameterized by parameter
$ \alpha $, converges to a state that is, at most, $
O(1 / \alpha) $ away from the exact emulation of
centralized processor sharing queue. The convergence
and stability properties are fully analyzed for both
C3P and D2R2. Analytical results are validated
empirically through a number of representative packet
level simulations. The closed-form nature of our
results allows simple design rules which, together with
extremely low communication overhead, makes the
presented algorithms practical and easy to deploy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CDN; cloud control; consensus agreement; rate
limiting; stability and convergence",
}
@Article{Jagabathula:2008:ODS,
author = "Srikanth Jagabathula and Devavrat Shah",
title = "Optimal delay scheduling in networks with arbitrary
constraints",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "395--406",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375502",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of designing an online
scheduling scheme for a multi-hop wireless packet
network with arbitrary topology and operating under
arbitrary scheduling constraints. The objective is to
design a scheme that achieves high throughput and low
delay simultaneously. We propose a scheduling scheme
that --- for networks operating under primary
interference constraints --- guarantees a per-flow
end-to-end packet delay bound of $^{5 d} j / (1 -
\rho_j)$, at a factor 5 loss of throughput, where $
d_j$ is the path length (number of hops) of flow $j$
and $ \rho_j$ is the effective loading along the route
of flow $j$. Clearly, $ d_j$ is a universal lower bound
on end-to-end packet delay for flow $j$. Thus, our
result is essentially optimal. To the best of our
knowledge, our result is the first one to show that it
is possible to achieve a per-flow end-to-end delay
bound of $ O({\rm \# of hops})$ in a constrained
network.\par
Designing such a scheme comprises two related
subproblems: Global Scheduling and Local Scheduling.
Global Scheduling involves determining the set of links
that will be simultaneously active, without violating
the scheduling constraints. While local scheduling
involves determining the packets that will be
transferred across active edges. We design a local
scheduling scheme by adapting the Preemptive
Last-In-First-Out (PL) scheme, applied for
quasi-reversible continuous time networks, to an
unconstrained discrete-time network. A global
scheduling scheme will be obtained by using stable
marriage algorithms to emulate the unconstrained
network with the constrained wireless network.\par
Our scheme can be easily extended to a network
operating under general scheduling constraints, such as
secondary interference constraints, with the same delay
bound and a loss of throughput that depends on
scheduling constraints through an intriguing `sub-graph
covering' property.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "delay; scheduling algorithm; throughput",
}
@Article{Jung:2008:RSL,
author = "Kyomin Jung and Yingdong Lu and Devavrat Shah and
Mayank Sharma and Mark S. Squillante",
title = "Revisiting stochastic loss networks: structures and
algorithms",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "407--418",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375503",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers structural and algorithmic
problems in stochastic loss networks. The very popular
Erlang approximation can be shown to provide relatively
poor performance estimates, especially for loss
networks in the critically loaded regime. This paper
proposes a novel algorithm for estimating the
stationary loss probabilities in stochastic loss
networks based on structural properties of the exact
stationary distribution, which is shown to always
converge, exponentially fast, to the asymptotically
exact results. Using a variational characterization of
the stationary distribution, an alternative proof is
provided for an important result due to Kelly, which is
simpler and may be of interest in its own right. This
paper also determines structural properties of the
inverse Erlang function characterizing the region of
capacities that ensures offered traffic is served
within a set of loss probabilities. Numerical
experiments investigate various issues of both
theoretical and practical interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Erlang loss formula and fixed-point approximation;
loss networks; multidimensional stochastic processes;
stochastic approximations",
}
@Article{Bonald:2008:TCM,
author = "Thomas Bonald and Ali Ibrahim and James Roberts",
title = "Traffic capacity of multi-cell {WLANS}",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "419--430",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375504",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance of WLANs has been extensively studied
during the past few years. While the focus has mostly
been on isolated cells, the coverage of WLANs is in
practice most often realised through several cells.
Cells using the same frequency channel typically
interact through the exclusion region enforced by the
RTS/CTS mechanism prior to the transmission of any
packet.\par
In this paper, we investigate the impact of this
interaction on the overall network capacity under
realistic dynamic traffic conditions. Specifically, we
represent each cell as a queue and derive the stability
condition of the corresponding coupled queuing system.
This condition is then used to calculate the network
capacity. To gain insight into the particular nature of
interference in multi-cell WLANs, we apply our model to
a number of simple network topologies and explicitly
derive the capacity in several cases. The results
notably show that the capacity gain obtained by using M
frequency channels can grow significantly faster than
M, the rate one might intuitively expect. In addition
to stability results, we present an approximate model
to derive the impact of network load on the mean
transfer rate seen by the users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "capacity; flow-level model; IEEE 802.11; multi-cell
WLAN; stability",
}
@Article{Reineke:2008:RCC,
author = "Jan Reineke and Daniel Grund",
title = "Relative competitiveness of cache replacement
policies",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "431--432",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375506",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cache performance; predictability; replacement policy;
WCET analysis; worst-case execution time",
}
@Article{Wen:2008:NDE,
author = "Zhihua Wen and Michael Rabinovich",
title = "Network distance estimation with dynamic landmark
triangles",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "433--434",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375507",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes an efficient and accurate
approach to estimate the network distance between
arbitrary Internet hosts. We use three landmark hosts
forming a triangle in two-dimensional space to estimate
the distance between arbitrary hosts with simple
trigonometric calculations. To improve the accuracy of
estimation, we dynamically choose the `best' triangle
for a given pair of hosts using a heuristic algorithm.
Our experiments show that this approach achieves both
lower computational and network probing cost over the
classic landmarks-based approach while producing more
accurate estimates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "network distance estimation",
}
@Article{Yuksel:2008:CSI,
author = "Murat Yuksel and Kadangode K. Ramakrishnan and
Shivkumar Kalyanaraman and Joseph D. Houle and Rita
Sadhvani",
title = "Class-of-service in {IP} {backbones}: informing the
network neutrality debate",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "435--436",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The benefit of Class-of-Service (CoS) is an important
topic in the `Network Neutrality' debate. Proponents of
network neutrality suggest that over-provisioning is a
viable alternative to CoS. We quantify the extra
capacity requirement for an over-provisioned classless
(i.e., best-effort) network compared to a CoS network
providing the same delay or loss performance for
premium traffic. We first develop a link model that
quantifies this Required Extra Capacity (REC). For
bursty and realistic traffic distributions, we find the
REC using ns-2 simulation comparisons of the CoS and
classless link cases. We use these link models to
quantify the REC for realistic network topologies. We
show that REC can be significant even when the
proportion of premium traffic is small, a situation
often considered benign for the over-provisioning
alternative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "class-of-service; economics; network neutrality;
performance",
}
@Article{Dreger:2008:PRC,
author = "Holger Dreger and Anja Feldmann and Vern Paxson and
Robin Sommer",
title = "Predicting the resource consumption of network
intrusion detection systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "437--438",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When installing network intrusion detection systems
(NIDSs), operators are faced with a large number of
parameters and analysis options for tuning trade-offs
between detection accuracy versus resource
requirements. In this work we set out to assist this
process by understanding and predicting the CPU and
memory consumption of such systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "NIDS; performance model",
}
@Article{Li:2008:EMA,
author = "Bin Li and Lu Peng and Balachandran Ramadass",
title = "Efficient {MART}-aided modeling for microarchitecture
design space exploration and performance prediction",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "439--440",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer architects usually evaluate new designs by
cycle-accurate processor simulation. This approach
provides detailed insight into processor performance,
power consumption and complexity. However, only
configurations in a subspace can be simulated in
practice due to long simulation time and limited
resource, leading to suboptimal conclusions which might
not be applied in a larger design space. In this paper,
we propose an automated performance prediction approach
which employs state-of-the-art techniques from
experiment design, machine learning and data mining.
Our method not only produces highly accurate
estimations for unsampled points in the design space,
but also provides interpretation tools that help
investigators to understand performance bottlenecks.
According to our experiments, by sampling only 0.02\%
of the full design space with about 15 millions points,
the median percentage errors, based on 5000 independent
test points, range from 0.32\% to 3.12\% in 12
benchmarks. Even for the worst-case performance, the
percentage errors are within 7\% for 10 out of 12
benchmarks. In addition, the proposed model can also
help architects to find important design parameters and
performance bottlenecks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "design space exploration; MART-aided models; Multiple
Additive Regression Trees (MARG); performance
prediction",
}
@Article{Balon:2008:CII,
author = "Simon Balon and Guy Leduc",
title = "Combined intra- and inter-domain traffic engineering
using hot-potato aware link weights optimization",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "441--442",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375511",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A well-known approach to intradomain traffic
engineering consists in finding the set of link weights
that minimizes a network-wide objective function for a
given intradomain traffic matrix. This approach is
inadequate because it ignores a potential impact on
interdomain routing due to hot-potato routing policies.
This may result in changes in the intradomain traffic
matrix that have not been anticipated by the link
weights optimizer, possibly leading to degraded network
performance.\par
We propose a BGP-aware link weights optimization method
that takes these hot-potato effects into account. This
method uses the interdomain traffic matrix and other
available BGP data, to extend the intradomain topology
with external virtual nodes and links, on which all the
well-tuned heuristics of a classical link weights
optimizer can be applied. Our method can also optimize
the traffic on the interdomain peering links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BGP; hot-potato routing; IGP; OSPF; traffic
engineering",
}
@Article{Anderson:2008:MDW,
author = "Eric W. Anderson and Caleb T. Phillips and Kevin S.
Bauer and Dirk C. Grunwald and Douglas C. Sicker",
title = "Modeling directionality in wireless networks: extended
abstract",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "443--444",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375512",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The physical-layer models commonly used in current
networking research only minimally address the
interaction of directional antennas and radio
propagation. This paper compares the models found in
popular simulation tools with measurements taken across
a variety of links in multiple environments. We find
that the effects of antenna direction are significantly
different from the models used by the common wireless
network simulators. We propose a parametric model which
better captures the effects of different propagation
environments on directional antenna systems. We believe
that adopting this model will allow more realistic
simulation of protocols relying on directional
antennas, supporting better design and more valid
assessment of those protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "antenna; directional; modeling; networking;
propagation; wireless",
}
@Article{Bremler-Barr:2008:LIC,
author = "Anat Bremler-Barr and David Hay and Danny Hendler and
Boris Farber",
title = "Layered interval codes for {TCAM}-based
classification",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "445--446",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375513",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "classification; TCAM",
}
@Article{Ramabhadran:2008:DRD,
author = "Sriram Ramabhadran and Joseph Pasquale",
title = "Durability of replicated distributed storage systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "447--448",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375514",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study the problem of guaranteeing data durability
[2] in distributed storage systems based on
replication. Our work is motivated by several recent
efforts [3, 5, 1] to build such systems in a
peer-to-peer environment. The key features of this
environment which make achieving durability difficult
are (1) data lifetimes may be several orders of
magnitude larger than the lifetimes of individual
storage units, and (2) the system may have little or no
control over the participation of these storage units
in the system. We use a model-based approach to develop
engineering principles for designing automated
replication and repair mechanisms to implement
durability in such systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "durability; replication",
}
@Article{Li:2008:IEM,
author = "Feihui Li and Mahmut Kandemir and Mary J. Irwin",
title = "Implementation and evaluation of a migration-based
{NUCA} design for chip multiprocessors",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "449--450",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375515",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Chip Multiprocessors (CMPs) and Non-Uniform Cache
Architectures (NUCAs) represent two emerging trends in
computer architecture. Targeting future CMP based
systems with NUCA type L2 caches, this paper proposes a
novel data migration algorithm for parallel
applications and evaluates it. The goal of this
migration scheme is to determine a suitable location
for each data block within a large L2 space at any
given point during execution. A unique characteristic
of the proposed scheme is that it models the problem of
optimal data placement in the L2 cache space as a two
dimensional post office placement problem, presents a
practical architectural implementation of this model,
and gives an evaluation of the proposed
implementation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CMP; data migration; NUCA; post office placement
problem",
}
@Article{Alouf:2008:MGQ,
author = "Sara Alouf and Eitan Altman and Amar Prakash Azad",
title = "{M/G/1} queue with repeated inhomogeneous vacations
applied to {IEEE 802.16e} power saving",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "451--452",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375516",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "constrained optimization; M/G/1 queue with repeated
inhomogeneous vacations; numerical analysis; power save
mode; system response time",
}
@Article{Seetharaman:2008:MID,
author = "Srinivasan Seetharaman and Mostafa H. Ammar",
title = "Managing inter-domain traffic in the presence of
{BitTorrent} file-sharing",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "453--454",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375517",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Overlay routing operating in a selfish manner is known
to cause undesired instability when it interacts with
native layer routing. We observe similar selfish
behavior with the BitTorrent protocol, where its
performance-awareness causes it to constantly alter the
routing decisions (peer and piece selection). This
causes fluctuations in the load experienced by the
underlying native network. By using real BitTorrent
traces and a comprehensive simulation with different
network characteristics, we show that BitTorrent
systems easily disrupt the load balance across
inter-domain links. Further, we find that existing
native layer traffic management schemes suffer from
several downsides and are not conducive to deployment.
To resolve this dilemma, we propose two BitTorrent
strategies that are effective in resolving the
cross-layer conflict.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; conflict; contention; cross-layer; traffic
engineering; traffic management",
}
@Article{Mota-Garcia:2008:COE,
author = "Edmar Mota-Garcia and Rogelio Hasimoto-Beltran",
title = "Clock offset estimation using collaborative one-way
transit time",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "455--456",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375518",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new collaborative clock offset estimation
scheme between two nodes in the Internet using
independent one-way offset estimations. Our proposal
(different than current schemes in the literature) is
intended to provide a fast and accurate clock offset
estimation in approximately [Round-Trip Time
(RTT)+40]ms. The scheme sends a group of 5 probes in
the forward and reverse paths, and models the One-way
Transit Time (OTT) by a Gamma distribution (with
parameters adapted to actual path condition) to
estimate the minimum distribution value (or long-term
minimum OTT value). End nodes exchange their
corresponding minimum distribution values to get an
improved final clock offset estimate, which takes into
account the network path asymmetries. We show that our
scheme provides a faster clock offset estimation with
lower RMSE and superior stability than NTP and current
NTP-like state of the art methodologies in the
literature.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clock offset estimation; one-way transit time",
}
@Article{Gupta:2008:SQL,
author = "Gagan R. Gupta and Ness B. Shroff",
title = "Scheduling with queue length guarantees for shared
resource systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "457--458",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375519",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We develop a class of schemes called GMWM that
guarantee optimal throughput for queuing systems with
arbitrary constraints on the set of jobs that can be
served simultaneously. We obtain an analytical upper
bound on the expected queue length. To further tighten
the upper bound, we formulate it as a convex
optimization problem. We also show that whenever the
arrival process is stabilizable, the scheme is
guaranteed to achieve an expected queue length that is
no larger than the expected queue length of any
stationary randomized policy.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Lyapunov theory; scheduling",
}
@Article{Chen:2008:ECD,
author = "Aiyou Chen and Li Li and Jin Cao",
title = "Estimating cardinality distributions in network
traffic: extended abstract",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "459--460",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375520",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Information on network host connectivity patterns are
important for network monitoring and traffic
engineering. In this paper, an efficient streaming
algorithm is proposed to estimate cardinality
distributions including connectivity distributions,
e.g. percent of hosts with any given number of distinct
communicating peers or flows.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cardinality distribution; streaming algorithm",
}
@Article{Grit:2008:WFS,
author = "Laura E. Grit and Jeffrey S. Chase",
title = "Weighted fair sharing for dynamic virtual clusters",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "461--462",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375521",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a shared server infrastructure, a scheduler
controls how quantities of resources are shared over
time in a fair manner across multiple, competing
consumers. It should support wide (parallel) requests
for variable-sized pool of resources, provide assurance
of minimum resource allotment on demand, and give
predictable assignments. Our approach integrates a fair
queuing algorithm with a calendar scheduler. We present
WINKS, a proportional share allocation policy that
addresses the needs of shared server environments. It
extends start-time fair queuing to support wide
requests with backfill, advance reservations, dynamic
cluster sizing, dynamic request sizing, and intra-flow
request prioritization. It also preserves fairness
properties across queue transformations and calendar
operations needed to implement these extensions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cluster computing; fair sharing; proportional sharing;
virtual computing; weighted fair queuing",
}
@Article{Sundaram:2008:ETF,
author = "Vasumathi Sundaram and Abhishek Chandra and Jon
Weissman",
title = "Exploring the throughput-fairness tradeoff of deadline
scheduling in heterogeneous computing environments",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "463--464",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375522",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The scalability and computing power of large-scale
computational platforms has made them attractive for
hosting compute-intensive time-critical applications.
Many of these applications are composed of
computational tasks that require specific deadlines to
be met for successful completion. In this paper, we
show that combining redundant scheduling with
deadline-based scheduling in these systems leads to a
fundamental tradeoff between throughput and fairness.
We propose a new scheduling algorithm called Limited
Resource Earliest Deadline (LRED) that couples
redundant scheduling with deadline-driven scheduling in
a flexible way by using a simple tunable parameter to
exploit this tradeoff. Our evaluation of LRED shows
that LRED provides a powerful mechanism to achieve
desired throughput or fairness under high loads and low
timeliness environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "deadline; fairness; throughput",
}
@Article{Papp:2008:CMV,
author = "Gabor Papp and Chris GauthierDickey",
title = "Characterizing multiparty voice communication for
multiplayer games",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "465--466",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1375457.1375523",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Over the last few years, the number of game players
using voice communication to talk to each other while
playing games has increased dramatically. In fact, many
modern games and game consoles have added voice support
instead of expecting third-party companies to provide
this technology. Unlike traditional voice-over-IP
technology, where most conversations are between two
people, voice communication in games often has 5 or
more people talking together as they play.\par
We present the first measurement study on the
characteristics of multiparty voice communications.
Over a 3 month period, we measured over 7,000 sessions
on an active multi-party voice communication server to
quantify the characteristics of communication generated
by game players, including overall server traffic,
group sizes, sessions characteristics, and speaking
(and silence) durations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer games; silence periods; talkspurts; voice
communication",
}
@Article{Meiners:2008:AAR,
author = "Chad R. Meiners and Alex X. Liu and Eric Torng",
title = "Algorithmic approaches to redesigning {TCAM}-based
systems",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "467--468",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375524",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; pipeline; range expansion;
TCAM",
}
@Article{Douceur:2008:PAR,
author = "John R. Douceur",
title = "Performance analysis in the real world",
journal = j-SIGMETRICS,
volume = "36",
number = "1",
pages = "469--470",
month = jun,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1384529.1375526",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Jun 27 09:43:29 MDT 2008",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "What issues are on the minds of industrial performance
analysts? Four representatives of world-class product
organizations will describe their work at the front
lines of measurement, modeling, and performance tuning.
Topics will include performance engineering of
middleware at IBM, tools for detecting false sharing in
large-scale multiprocessors at Hewlett--Packard, kernel
thread-scheduling performance in multiprocessors at
Microsoft, and low-overhead instrumentation for
profiling large-scale services at Google. Plenty of
time will be available to ask questions about how to
direct our research to have the greatest impact on
industrial practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "engineering; kernel performance; large-scale services;
middleware; performance analysis; profiling tools;
storage systems",
}
@Article{Tan:2008:IMV,
author = "Tingxi Tan and Rob Simmonds and Bradley Arlt and
Martin Arlitt and Bruce Walker",
title = "Image management in a virtualized data center",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "4--9",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453177",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Industrial research firms such as Gartner and IDC are
predicting an explosion in the number of online
services in the coming years. Virtualization
technologies could play an important role in such a
world, as they create an opportunity to provide
services in a cost-effective manner. However, to
achieve ideal savings, more dynamic environments must
be created, with Virtual Machines (VMs) being
provisioned and altered on-the-fly. Management issues
arise when using these elastic resources at scale. In
this study, we provide an initial investigation of
performance and scalability issues for image management
in a virtualized data center. Results provided show
that the choice of storage solution and access protocol
matters. For example, our tests show the time to start
a VM from a local hard drive under I/O intensive
workload increases by a factor of 15 and for certain
shared storage options, this factor increases to 30
times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "image management; performance; scalability;
virtualization",
}
@Article{Chandra:2008:CDF,
author = "Abhishek Chandra and Rohini Prinja and Sourabh Jain
and ZhiLi Zhang",
title = "Co-designing the failure analysis and monitoring of
large-scale systems",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "10--15",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453178",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Large-scale distributed systems provide the backbone
for numerous distributed applications and online
services. These systems span over a multitude of
computing nodes located at different geographical
locations connected together via wide-area networks and
overlays. A major concern with such systems is their
susceptibility to failures leading to downtime of
services and hence high monetary/business costs. In
this paper, we argue that to understand failures in
such a system, we need to co-design monitoring system
with the failure analysis system. Unlike existing
monitoring systems which are not designed specifically
for failure analysis, we advocate a new way to design a
monitoring system with the goal of uncovering causes of
failures. Similarly the failure analysis techniques
themselves need to go beyond simple statistical
analysis of failure events in isolation to serve as an
effective tool. Towards this end, we provide a
discussion of some guiding principles for the co-design
of monitoring and failure analysis systems for
planetary scale systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2008:ARC,
author = "Abhishek B. Sharma and Ranjita Bhagwan and Monojit
Choudhury and Leana Golubchik and Ramesh Govindan and
Geoffrey M. Voelker",
title = "Automatic request categorization in {Internet}
services",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "16--25",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453179",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modeling system performance and workload
characteristics has become essential for efficiently
provisioning Internet services and for accurately
predicting future resource requirements on anticipated
workloads. The accuracy of these models benefits
substantially by differentiating among categories of
requests based on their resource usage characteristics.
However, categorizing requests and their resource
demands often requires significantly more monitoring
infrastructure. In this paper, we describe a method to
automatically differentiate and categorize requests
without requiring sophisticated monitoring techniques.
Using machine learning, our method requires only
aggregate measures such as total number of requests and
the total CPU and network demands, and does not assume
prior knowledge of request categories or their
individual resource demands. We explore the feasibility
of our method on the .Net PetShop 4.0 benchmark
application, and show that it works well while being
lightweight, generic, and easily deployable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kansal:2008:FGE,
author = "Aman Kansal and Feng Zhao",
title = "Fine-grained energy profiling for power-aware
application design",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "26--31",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453180",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Significant opportunities for power optimization exist
at application design stage and are not yet fully
exploited by system and application designers. We
describe the challenges developers face in optimizing
software for energy efficiency by exploiting
application-level knowledge. To address these
challenges, we propose the development of automated
tools that profile the energy usage of various resource
components used by an application and guide the design
choices accordingly. We use a preliminary version of a
tool we have developed to demonstrate how automated
energy profiling helps a developer choose between
alternative designs in the energy-performance trade-off
space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fonseca:2008:LRM,
author = "Nahur Fonseca and Mark Crovella and Kav{\'e}
Salamatian",
title = "Long range mutual information",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "32--37",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453181",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network traffic modeling generally views traffic as a
superposition of flows that creates a timeseries of
volume counts (e.g. of bytes or packets). What is
omitted from this view of traffic is the contents of
packets. Packet contents (e.g. header fields) contain
considerable information that can be useful in many
applications such as change and anomaly detection, and
router performance evaluation. The goal of this paper
is to draw attention to the problem of modeling traffic
with respect to the contents of packets. In this
regard, we identify a new phenomenon: long range mutual
information (LRMI), which means that the dependence of
the contents of a pair of packets decays as a power of
the lag between them. We demonstrate that although LRMI
is hard to measure, and hard to model using the
mathematical tools at hand, its effects are easy to
identify in real traffic, and it may have a
considerable impact on a number of applications. We
believe that work in modeling this phenomenon will open
doors to new kinds of traffic models, and new advances
in a number of applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2008:HPM,
author = "Giuliano Casale and Ningfang Mi and Ludmila Cherkasova
and Evgenia Smirni",
title = "How to parameterize models with bursty workloads",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "38--44",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453182",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Although recent advances in theory indicate that
burstiness in the service time process can be handled
effectively by queueing models (e.g.,MAP queueing
networks [2]), there is a lack of understanding and of
practical results on how to perform model
parameterization, especially when this parameterization
must be derived from limited coarse
measurements.\par
We propose a new parameterization methodology based on
the index of dispersion of the service process at a
server, which is inferred by observing the number of
completions within the concatenated busy periods of
that server. The index of dispersion together with
other measurements that reflect the 'estimated' mean
and the 95th percentile of service times are used to
derive a MAP process that captures well burstiness of
the true service process.\par
Detailed experimentation on a TPC-W testbed where all
measurements are obtained via a commercially available
tool, the HP (Mercury) Diagnostics, shows that the
proposed technique offers a simple yet powerful
solution to the difficult problem of inferring accurate
descriptors of the service time process from coarse
measurements. Experimental and model prediction results
are in excellent agreement and argue strongly for the
effectiveness of the proposed methodology under bursty
or simply variable workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2008:DPF,
author = "Bill Lin and Jun (Jim) Xu",
title = "{DRAM} is plenty fast for wirespeed statistics
counting",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "45--51",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453183",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Per-flow network measurement at Internet backbone
links requires the efficient maintenance of large
arrays of statistics counters at very high speeds (e.g.
40 Gb/s). The prevailing view is that SRAM is too
expensive for implementing large counter arrays, but
DRAM is too slow for providing wirespeed updates. This
view is the main premise of a number of hybrid
SRAM/DRAM architectural proposals [2, 3, 4, 5] that
still require substantial amounts of SRAM for large
arrays. In this paper, we present a contrarian view
that modern commodity DRAM architectures, driven by
aggressive performance roadmaps for consumer
applications (e.g. video games), have advanced
architecture features that can be exploited to make
DRAM solutions practical. We describe two such schemes
that can harness the performance of these DRAM
offerings by enabling the interleaving of counter
updates to multiple memory banks. These counter schemes
are the first to support arbitrary increments and
decrements for either integer or floating point number
representations at wirespeed. We believe our
preliminary success with the use of DRAM schemes for
wirespeed statistics counting opens the possibilities
for broader research opportunities to generalize the
proposed ideas for other network measurement
functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data streaming; network management; network
measurement; statistics counter",
}
@Article{Agrawal:2008:TRF,
author = "Nitin Agrawal and Andrea C. Arpaci-Dusseau and Remzi
H. Arpaci-Dusseau",
title = "Towards realistic file-system benchmarks with
{CodeMRI}",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "52--57",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453184",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarks are crucial to understanding software
systems and assessing their performance. In file-system
research, synthetic benchmarks are accepted and widely
used as substitutes for more realistic and complex
workloads. However, synthetic benchmarks are largely
based on the benchmark writer's interpretation of the
real workload, and how it exercises the system API.
This is insufficient since even a simple operation
through the API may end up exercising the file system
in very different ways due to effects of features such
as caching and prefetching. In this paper, we describe
our first steps in creating 'realistic synthetic'
benchmarks by building a tool, CodeMRI. CodeMRI
leverages file-system domain knowledge and a small
amount of system profiling in order to better
understand how the benchmark is stressing the system
and to deconstruct its workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weingartner:2008:SNE,
author = "Elias Weing{\"a}rtner and Florian Schmidt and Tobias
Heer and Klaus Wehrle",
title = "Synchronized network emulation: matching prototypes
with complex simulations",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "58--63",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453185",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network emulation, in which real systems interact with
a network simulation, is a common evaluation method in
computer networking research. Until now, the simulation
in charge of representing the network has been required
to be real-time capable, as otherwise a time drift
between the simulation and the real network devices may
occur and corrupt the results. In this paper, we
present our work on synchronized network emulation. By
adding a central synchronization entity and by
virtualizing real systems for means of control, we can
build-up network emulations which contain both
unmodified x86 systems and network simulations of any
complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamurthy:2008:WOS,
author = "Balachander Krishnamurthy and Walter Willinger",
title = "What are our standards for validation of
measurement-based networking research?",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "64--69",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453186",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Standards? What standards?",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Curry:2008:RAE,
author = "Roger Curry and Cameron Kiddle and Nayden Markatchev
and Rob Simmonds and Tingxi Tan and Martin Arlitt and
Bruce Walker",
title = "Running applications efficiently in online social
networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "71--74",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453188",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the past several years, online social networks
(OSNs) such as Facebook and MySpace have become
extremely popular with Internet users. Such sites are
popular with users because they simplify both
communication among 'communities' and access to
applications. Application developers are attracted to
these sites also, as they are able to exploit
'word-of-mouth' marketing, which these OSN sites have
embodied into their user experience. A challenge for
developers though is managing the application, as it is
difficult to predict how successful the marketing will
be. Our solution combines an OSN, Virtual Appliances,
and a utility computing environment together. We
demonstrate our solution using the Facebook portal
(OSN), the Fire Dynamics Simulator (application), and a
utility environment we built using tools such as
Condor, Moab and Xen. The application is supported
using Virtual Appliances, which interact with our
flexible infrastructure to dynamically expand and
contract based on user demand. Thus, we are able to
make much more efficient use of the underlying physical
infrastructure. We believe that our solution also has
great potential for enterprise IT environments. Initial
feedback suggests combining an OSN with our flexible
infrastructure provides a much better user experience
than the traditional, standalone use of the (legacy)
application, and simplifies the management and
increases the effective utilization of the underlying
IT resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "infrastructure; social networking; virtualization",
}
@Article{Zhang:2008:KTB,
author = "Eddy Zheng Zhang and Giuliano Casale and Evgenia
Smirni",
title = "{KPC-Toolbox}: best recipes toward automatization of
workload fitting",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "75--78",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453189",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the KPC-Toolbox, a set of MATLAB scripts
for fitting workload traces into Markovian Arrival
Processes (MAPs) in an automatic way. Given that the
MAP parameterization space can be very large, we focus
on first determining the order of the smallest MAP that
can fit the trace well using the Bayesian Information
Criterion ({\em BIC\/}). Having determined the order of
the target MAP, the KPC-Toolbox automatically derives a
MAP that captures accurately the moments and temporal
dependence of the trace. We present experiments showing
the effectiveness of the KPC-Toolbox in fitting traces
that are well-documented in the literature as very
challenging ones to fit.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DeVera:2008:AQE,
author = "Daniel {De Vera} and Pablo Rodr{\'\i}guez-Bocca and
Gerardo Rubino",
title = "Automatic quality of experience measuring on video
delivering networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "79--82",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453190",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This article describes a full video delivery network
monitoring suite. Our monitoring tool offers a new view
of a video delivery network, based on the quality as
perceived by final users (what is nowadays called
Quality of Experience, in short QoE). We measure the
perceived quality at the client side by means of the
recently proposed PSQA technology, by studying the
video flows at the frame level. The developed
monitoring suite is a completely free-software
application, based on well-known technologies such as
Simple Network Management Protocol or Round Robin
Databases, which can be executed in various operating
systems. In this short article we explain the tool
implementation and we present some of the measurements
performed with it.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "measuring; monitoring; QoE; VDN; video",
}
@Article{Rossi:2008:PS,
author = "Dario Rossi and Silvio Valenti and Paolo Veglia and
Dario Bonfiglio and Marco Mellia and Michela Meo",
title = "Pictures from the {Skype}",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "83--86",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453191",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper focuses on the characterization and
classification of Skype traffic, a nowadays very
popular and fashionable VoIP application. Building over
previous work, we develop a software tool which can be
used to examine the evolution of Skype call
classification in an interactive fashion. The
demonstrator software focuses on the main aspects of
Skype traffic characterization and presents the traffic
patterns Skype generates during a call or while idle.
In addition, the demonstrator shows the evolution of
the internal indexes the Skype classifiers
use.\par
After describing the classification process and the
demonstrator software, we use the tool to demonstrate
the feasibility of online Skype traffic identification,
considering both accuracy and computational costs.
Experimental results show that few seconds of
observation are enough to allow the classifier engines
to correctly identify the presence of Skype flows.
Moreover, results indicate that the classification
engine can cope with multi-Gbps links in real-time
using common off-the-shelf hardware.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "demonstrator; experimentation; measurement",
}
@Article{Ormont:2008:CMW,
author = "Justin Ormont and Jordan Walker and Suman Banerjee",
title = "Continuous monitoring of wide-area wireless networks:
data collection and visualization",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "87--89",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453192",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we present an infrastructure for
monitoring and visualizing performance of a wide-area
wireless network, We present a unique, vehicle-mounted
platform and a testbed for wide-area wireless
experimentation. The testbed nodes are mounted on metro
transit city buses in Madison, WI, and are currently
equipped with both cellular EV-DO* and WiFi interfaces.
Our initial goal for this infrastructure is to
continuously monitor characteristics and performance of
large-scale wireless networks, e.g., city-wide mesh
networks or cellular networks. In such networks,
customers experience a very large range of geographic
and mobility-related radio environments. A
vehicle-mounted platform, with fairly deterministic
mobility patterns, can provide an efficient, low-cost,
and robust method to gather much needed performance
data on parameters like RF coverage, available
bandwidth, and impact of mobility. Our demonstration
outlines the framework of such a distributed
measurement system. We also showcase the potential
benefits by presenting our initial measurements from
this testbed through the use of intuitive visualization
interface.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anouar:2008:OOW,
author = "Hicham Anouar and Christian Bonnet and Daniel
C{\^a}mara and Fethi Filali and Raymond Knopp",
title = "An overview of {OpenAirInterface} wireless network
emulation methodology",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "90--94",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453193",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The OpenAirInterface wireless network emulator, a tool
with the dual objective of performing protocol and
application performance evaluation, in addition to
real-time layer 2/3 protocol implementation validation,
is described. The current example protocol
implementations closely resemble those of evolving
UMTS-LTE and 802.16e/m networks with the additional
possibility for creating mesh network topologies. They
do not provide any form of compliance, however, with
these standards. The emulation environment comes in
both real-time and non-real-time flavors based on
RTAI/Linux open-source developments. Novel ideas for
physical layer (PHY) abstraction are also reviewed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2008:NPN,
author = "Xiaoyue Jiang",
title = "New perspectives on network calculus",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "95--97",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453195",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Originated in communications engineering and
theoretically rooted in idempotent analysis, the theory
of network calculus (NetCal) presents an elegant
methodology for offering performance guarantees in
deterministic queuing systems. In this research we
developed two new formulations of Net-Cal, each of
which bears some unique insights. A fuzzy formulation
maps NetCal's (min,+) convolution operator to the
addition of fuzzy numbers. A conjugate perspective
based on the notion of Legendre transform leads to a
new NetCal formulation to be termed as CT-NetCal, which
possesses some distinct advantages in modeling,
computation and interpretation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fuzzy number addition; Legendre transform; network
calculus; product norm",
}
@Article{Garikiparthi:2008:BPA,
author = "Chaitanya Garikiparthi and Appie van de Liefvoort and
Ken Mitchell",
title = "Busy period analysis of finite {QBD} processes",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "98--100",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453196",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the number of customers served and the
length of a busy period for finite quasi birth and
death (QBD) processes where either one or both of the
arrival or service processes can be serially correlated
or interdependent. Special cases include the G/G/1/K,
M/G/1/K, and G/M/1/K queues. The resulting algorithms
are linear algebraic in nature and are easily
implemented. The solutions allow studies on how the
moments and correlations in the arrival and service
processes affect the busy period. This includes the
probability of serving exactly {\em n\/} customers
during a busy period and the moments of the length of
the busy period for different system (queue) sizes. We
present an example of a QBD process where arrival and
service processes are strongly dependent.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2008:FRS,
author = "Predrag R. Jelenkovi{\'c} and Xiaozhu Kang",
title = "Is fair resource sharing responsible for spreading
long delays?",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "101--103",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453197",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We show that mixing the statistically long jobs
(subexponential) and short ones (exponentially bounded)
using processor sharing service discipline causes long
(subexponential) delays for all types of jobs in the
system. Since processor sharing represents a baseline
fair scheduling discipline used in designing Web
servers, as well as the basic model of TCP bandwidth
sharing, our finding suggests that even though fairness
possesses many desirable attributes, it causes
unnecessarily long delays for statistically short jobs.
Hence, fairness comes with a price.\par
We further quantify the preceding result when the long
jobs follow the widely observed power law distribution
$ x^{- \alpha } $, $ \alpha $ > 0, where we discover
the criticality of the {\em lognormal\/} distribution
for the delay characteristics of the lighter jobs.
Specifically, we find that when the shorter jobs are
heavier than {\em lognormal}, the sojourn time $V$ and
the service time distribution $B$ of the shorter jobs
are tail equivalent $ P[V > x] \sim P[B > (1 - \rho)
x]$. However, if $ P[B > x]$ is lighter than {\em
lognormal}, the preceding tail equivalence does not
hold.\par
Furthermore, when the shorter jobs $B$ have much
lighter tails $ e^{- \lambda x \& \# 946}$, $ \lambda >
0$, $ \beta > 0$, we show that the distribution of the
delay $V$ for these jobs satisfy, as $ x \rightarrow
\infty $, $ - \log P[V > x] \sim c(x \log x) \beta /
\beta + 1$, where $c$ is explicitly computable. Note
that $ \beta = 1$ and $ \beta = 2$ represent the
exponential and Gaussian cases with the corresponding
delay distributions approximately of the form $ e^{-
\sqrt {x} \log x}$ and $ e^{-(x \log x) 2 / 3}$,
respectively. Our results are different from the
existing ones in the literature that focused on the
delays which are of the same form (tail equivalent) as
the jobs size distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic analysis; fairness; heavy tails; induced
long delays; light tails; processor sharing queue;
scheduling",
}
@Article{Gupta:2008:FOQ,
author = "Varun Gupta",
title = "Finding the optimal quantum size: {Sensitivity}
analysis of the {\em {M\slash G\slash 1\/}} round-robin
queue",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "104--106",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453198",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the round robin (RR) scheduling policy
where the server processes each job in its buffer for
at most a fixed quantum, q, in a round-robin fashion.
The processor sharing (PS) policy is an idealization of
the quantum-based round-robin scheduling in the limit
where the quantum size becomes infinitesimal, and has
been the subject of many papers. It is well known that
the mean response time in an M/G/1/PS queue depends on
the job size distribution via only its mean. However,
almost no explicit results are available for the
round-robin policy. For example, how does the
variability of job sizes affect the mean response time
in an M/G/1/RR queue? How does one choose the optimal
quantum size in the presence of switching overheads? In
this paper we present some preliminary answers to these
fundamental questions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2008:ASI,
author = "Eitan Bachmat and Hagit Sarfati",
title = "Analysis of size interval task assignment policies",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "107--109",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453199",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze the performance of Size Interval task
assignment (SITA) scheduling policies, for multi-host
scheduling in a non-preemptive environment. We
establish a general duality theory for the performance
analysis of SITA policies. When the job size
distribution is Bounded Pareto and the range of job
sizes tends to infinity. we determine asymptotically
optimal cutoff values and provide asymptotic formulas
for average waiting time and slowdown. In the case of
inhomogeneous hosts we determine their optimal
ordering. We also consider TAGS policies. We provide a
general formula that describes their load handling
capabilities and examine their performance when the job
size distribution is Bounded Pareto.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2008:ELS,
author = "Ho-Lin Chen and Jason R. Marden and Adam Wierman",
title = "The effect of local scheduling in load balancing
designs",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "110--112",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453200",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wu:2008:JRP,
author = "Yuan Wu and Danny H. K. Tsang",
title = "Joint rate-and-power allocation for multi-channel
spectrum sharing networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "113--115",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453201",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this abstract, we propose a study on joint
rate-and-power allocation problem for multi-channel
spectrum sharing networks (SSNs). We formulate this
cross-layer optimization problem as a non-cooperative
potential game {\em G\/}$_{{\em JRPA \/ }}$ in which
each user has a coupled two-tuple strategy, i.e.,
simultaneous rate and multi-channel power allocations.
A multi-objective cost function is designed to
represent user's awareness of both QoS provisioning and
power saving. Using the game-theoretic formulation, we
investigate the properties of Nash equilibrium (N.E.)
for our {\em G\/}$_{{\em JRPA \/ }}$ model, including
its existence, and properties of QoS provisioning as
well as power saving. Furthermore, a layered structure
is derived by applying Lagrangian dual decomposition to
{\em G\/}$_{{\em JRPA \/ }}$ and a distributed
algorithm is proposed to find the N.E. via this
structure.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2008:SMB,
author = "Pei Li and John C. S. Lui and Yinlong Xu",
title = "A stochastic model for {BitTorrent}-like systems",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "116--118",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453202",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2008:CMS,
author = "Predrag R. Jelenkovi{\'c} and Xiaozhu Kang",
title = "Characterizing the miss sequence of the {LRU} cache",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "119--121",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453203",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Renewed interest in caching systems stems from their
wide-spread use for reducing the document download
latency over the Internet. Since caches are usually
organized in a hierarchical manner, it is important to
study the performance properties of tandem caches. The
first step in understanding this problem is to
characterize the miss stream from one single cache
since it represents the input to the next level cache.
In this regard, we discover that the miss stream from
one single cache is approximated well by the
superposition of a number of asymptotically independent
renewal processes. Interestingly, when this weakly
correlated miss sequence is fed into another cache,
this barely observable correlation can lead to
measurably different caching performance when compared
to the independent reference model. This result is
likely to enable the development of a rigorous analysis
of the tandem cache performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "average-case analysis; cache fault probability;
hierarchical caching; least-recently-used caching; web
caching; Zipf's law",
}
@Article{Simatos:2008:SSM,
author = "Florian Simatos and Danielle Tibi",
title = "Study of a stochastic model for mobile networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "122--124",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453204",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Momcilovic:2008:TSL,
author = "Petar Mom{\v{c}}ilovi{\'c} and Mark S. Squillante",
title = "On throughput in stochastic linear loss networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "125--127",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453205",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2008:FLR,
author = "Varun Gupta and Peter G. Harrison",
title = "Fluid level in a reservoir with an on-off source",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "128--130",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453206",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We obtain the Laplace transform of the fluid level
probability density function, in terms of the on-period
density function, for a fluid queue (or reservoir) with
on-off input at equilibrium. We further obtain explicit
expressions for the moments of fluid level in terms of
the moments of the on-period and hence derive an
algorithm for the moments of fluid level at every queue
in a tandem network. It turns out that to calculate the
$k$ th moment at the $i$ th queue, only the first $ k +
1$ moments of the on-period of the input process to the
first queue are required.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwak:2008:SAS,
author = "K. J. Kwak and Y. M. Baryshnikov and E. G. Coffman",
title = "Self-assembling sweep-and-sleep sensor systems",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "131--133",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453207",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper describes a self-assembling sleep-wake
sensor system that is scalable, easily implemented, and
energy conserving. Sensors actively detecting events
form wave fronts that sweep the sensor field. An
application of concepts from cellular automata theory
accounts for much of its novelty. The system has
additional, highly desirable properties such as a
self-healing capability, fault tolerance, asynchronous
operation, seamless accommodation of obstacles in the
sensor field, and it is highly effective even in the
case of intelligent intruders, i.e., those who know
sensor design and sensor locations. System performance
is a focus of the paper, and, as in the study of the
emergent behavior of cellular automata, an instructive
example of experimental mathematics. Related open
questions in mathematical performance analysis are
reviewed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2008:CCO,
author = "Giuliano Casale",
title = "{CoMoM}: class-oriented evaluation of multiclass
models",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "134--136",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453208",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dieker:2008:COF,
author = "A. B. Dieker and S. Ghosh and M. S. Squillante",
title = "Capacity optimization in feedforward {Brownian}
networks",
journal = j-SIGMETRICS,
volume = "36",
number = "2",
pages = "137--139",
month = sep,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1453175.1453209",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:31:09 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Haverkort:2008:QAG,
author = "Boudewijn R. Haverkort and Markus Siegle and Maarten
van Steen",
title = "Quantitative analysis of gossiping protocols",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "2--2",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481508",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Katoen:2008:HMA,
author = "Joost-Pieter Katoen",
title = "How to model and analyze gossiping protocols?",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "3--6",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481509",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crouzen:2008:AFM,
author = "Pepijn Crouzen and Jaco van de Pol and Arend Rensink",
title = "Applying formal methods to gossiping networks with
{mCRL} and groove",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "7--16",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481510",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we explore the practical possibilities
of using formal methods to analyze gossiping networks.
In particular, we use &\#956;CRL and Groove to model
the peer sampling service, and analyze it through a
series of model transformations to CTMCs and finally
MRMs. Our tools compute the expected value of various
network quality indicators, such as average path
lengths, over all possible system runs. Both transient
and steady state analysis are supported. We compare our
results with the simulation and emulation results found
in [10].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kwiatkowska:2008:AGP,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "Analysis of a gossip protocol in {PRISM}",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "17--22",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481511",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gossip protocols have been proposed as a robust and
efficient method for disseminating information
throughout dynamically changing networks. We present an
analysis of a gossip protocol using probabilistic model
checking and the tool PRISM. Since the behaviour of
these protocols is both probabilistic and
nondeterministic in nature, this provides a good
example of the exhaustive, quantitative analysis that
probabilistic model checking techniques can provide. In
particular, we compute minimum and maximum values,
representing the best- and worst-case performance of
the protocol under any scheduling, and investigate both
their relationship with the average values that would
be obtained through simulation and the precise
scheduling which achieve these values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krieger:2008:VPM,
author = "Thomas Krieger and Martin Riedl and Johann Schuster
and Markus Siegle",
title = "A view-probability-matrix approach to the modelling of
gossiping protocols",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "23--30",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481512",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the quantitative analysis of
gossiping protocols. In contrast to existing approaches
which are entirely based on the simulation of the
individual nodes' behaviours, we present a new approach
based on summary stochastic models for the peer
sampling service. Instead of an ordinary state- and
transition-based model, a matrix-based approach is
presented. Starting from a basic model with static node
population and without ageing of neighbourhood
information, refinements of the model are presented
which enable the modelling of ageing and dynamic
population. The paper also contains some experimental
results for the different models introduced in the
paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bakhshi:2008:MAE,
author = "Rena Bakhshi and Lucia Cloth and Wan Fokkink and
Boudewijn R. Haverkort",
title = "{MeanField} analysis for the evaluation of gossip
protocols",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "31--39",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481513",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Gossip protocols are designed to operate in very
large, decentralised networks. A node in such a network
bases its decision to interact (gossip) with another
node on its partial view of the global system. Because
of the size of these networks, analysis of gossip
protocols is mostly done using simulation, which tend
to be expensive in computation time and memory
consumption.\par
We introduce mean-field analysis as an analytical
method to evaluate gossip protocols. Nodes in the
network are represented by small identical stochastic
models. Joining all nodes would result in an enormous
stochastic process. If the number of nodes goes to
infinity, however, mean-field analysis allows us to
replace this intractably large stochastic process by a
small deterministic process. This process approximates
the behaviour of very large gossip networks, and can be
evaluated using simple matrix-vector multiplications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Estrada:2008:DEM,
author = "Trilce Estrada and Olac Fuentes and Michela Taufer",
title = "A distributed evolutionary method to design scheduling
policies for volunteer computing",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "40--49",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481515",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Volunteer Computing (VC) is a paradigm that takes
advantage of idle cycles from computing resources
donated by volunteers and connected through the
Internet to compute large-scale, loosely coupled
simulations. A big challenge in VC projects is the
scheduling of work-units across heterogeneous,
volatile, and error-prone computers. The design of
efficient scheduling policies for VC projects involves
subjective and time-demanding tuning that is driven by
knowledge of the project designer. VC projects are in
need of a faster and project-independent method to
automate the scheduling design.\par
To automatically generate a scheduling policy, we must
explore the extremely large space of syntactically
valid policies. Given the size of this search space,
exhaustive search is not feasible. Thus in this paper
we propose to solve the problem using an evolutionary
method to automatically generate a set of scheduling
policies that are project-independent, minimize errors,
and maximize throughput in VC projects. Our method
includes a genetic algorithm where the representation
of individuals, the fitness function, and the genetic
operators are specifically tailored to get effective
policies in a short time. The effectiveness of our
method is evaluated with SimBA, a Simulator of BOINC
Applications. In contrast with manually designed
scheduling policies that often perform well only for
the specific project they were designed for and require
months of tuning, our resulting scheduling policies
provide better overall throughput across the different
VC projects considered in this work and were generated
by our method in a time window of one week.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed systems; genetic algorithms; global
computing; volatile systems",
}
@Article{Eddy:2008:BPI,
author = "Wesley M. Eddy",
title = "Basic properties of the {IPv6} {AS}-level topology",
journal = j-SIGMETRICS,
volume = "36",
number = "3",
pages = "50--57",
month = dec,
year = "2008",
CODEN = "????",
DOI = "https://doi.org/10.1145/1481506.1481516",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:25 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Several well-known measurement studies have revealed
aspects of the Internet's AS-level and router-level
topologies, and derived a few important properties.
This has yielded graph models and parameter ranges that
allow for greater confidence in simulation of new
protocols as well as a deeper understanding of the
Internet's structure and similarity to other types of
technological, biological, economic, and social
networks. The majority of Internet topology studies
have been focused on the IPv4 portion of the Internet,
and at this time relatively few observations of the
Internet's IPv6 topology have been published. In this
report, we use over three years of data gathered in the
Route Views archives to describe some basic properties
of the IPv6 AS-level topology. We find similarities
with the IPv4 AS graph in several regards, including
the small-world nature of the graph. We also find some
interesting differences, including the values of the
graph's diameter and the criticality of a few
well-connected nodes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2009:SIT,
author = "Giuliano Casale and Richard R. Muntz and Giuseppe
Serazzi",
title = "Special issue on tools for computer performance
modeling and reliability analysis",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "2--3",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baarir:2009:GTR,
author = "Soheib Baarir and Marco Beccuti and Davide Cerotti and
Massimiliano De Pierro and Susanna Donatelli and
Giuliana Franceschinis",
title = "The {GreatSPN} tool: recent enhancements",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "4--9",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "GreatSPN is a tool that supports the design and the
qualitative and quantitative analysis of Generalized
Stochastic Petri Nets (GSPN) and of Stochastic
Well-Formed Nets (SWN). The very first version of
GreatSPN saw the light in the late eighties of last
century: since then two main releases where developed
and widely distributed to the research community:
GreatSPN1.7 [13], and GreatSPN2.0 [8]. This paper
reviews the main functionalities of GreatSPN2.0 and
presents some recently added features that
significantly enhance the efficacy of the tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertoli:2009:JPE,
author = "Marco Bertoli and Giuliano Casale and Giuseppe
Serazzi",
title = "{JMT}: performance engineering tools for system
modeling",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "10--15",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present the Java Modelling Tools (JMT) suite, an
integrated framework of Java tools for performance
evaluation of computer systems using queueing models.
The suite offers a rich user interface that simplifies
the definition of performance models by means of wizard
dialogs and of a graphical design workspace.\par
The performance evaluation features of JMT span a wide
range of state-of-the-art methodologies including
discrete-event simulation, mean value analysis of
product-form networks, analytical identification of
bottleneck resources in multiclass environments, and
workload characterization with fuzzy clustering. The
discrete-event simulator supports several advanced
modeling features such as finite capacity regions,
load-dependent service times, bursty processes,
fork-and-join nodes, and implements spectral estimation
for analysis of simulative results. The suite is
open-source, released under the GNU general public
license (GPL), and it is available for free download
at: http://jmt.sourceforge.net.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gaonkar:2009:PDM,
author = "Shravan Gaonkar and Ken Keefe and Ruth Lamprecht and
Eric Rozier and Peter Kemper and William H. Sanders",
title = "Performance and dependability modeling with
{M{\"o}bius}",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "16--21",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "M{\"o}bius is a multi-paradigm multi-solution
framework to describe and analyze stochastic models of
discrete-event dynamic systems. M{\"o}bius is widely
used in academia and industry for the performance and
dependability assessment of technical systems. It comes
with a design of experiments as well as automated
support for distributing a series of simulation
experiments over a network to support the exploration
of design spaces for real-world applications. In
addition to that, the M{\"o}bius simulator interfaces
with Traviando, a separate trace analyzer and
visualizer that helps to investigate the details of a
complex model for validation, verification, and
debugging purposes. In this paper, we outline the
development of a multi-formalism model of a Lustre-like
file system, the analysis of its detailed simulated
behavior, and the results obtained from a simulation
study.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arns:2009:OTO,
author = "Markus Arns and Peter Buchholz and Dennis M{\"u}ller",
title = "{OPEDo}: a tool for the optimization of performance
and dependability models",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "22--27",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "OPEDo is a software tool for the optimization of
discrete event systems according to performance or
dependability measures. The tool can be seen as an add
on to various tools for performance and dependability
analysis. The goal of OPEDo is to provide a wide
variety of optimization algorithms for complex black
box functions as they are required for the model based
optimization of discrete event systems using
analytically tractable models or simulation models. The
paper introduces the software architecture of the tool,
gives a brief sketch of the integrated optimization
algorithms and presents several examples.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tribastone:2009:PEP,
author = "Mirco Tribastone and Adam Duguid and Stephen Gilmore",
title = "The {PEPA Eclipse} plugin",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "28--33",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The PEPA Eclipse Plug-in supports the creation and
analysis of performance models, from small-scale Markov
models to large-scale simulation studies and
differential equation systems. Whichever form of
analysis is used, models are expressed in a single
highlevel language for quantitative modelling,
Performance Evaluation Process Algebra (PEPA).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dingle:2009:PTP,
author = "Nicholas J. Dingle and William J. Knottenbelt and
Tamas Suto",
title = "{PIPE2}: a tool for the performance evaluation of
generalised stochastic {Petri Nets}",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "34--39",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530881",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents an overview of
Platform-Independent Petri Net Editor 2 (PIPE2), an
open-source tool that supports the design and analysis
of Generalised Stochastic Petri Net (GSPN) models.
PIPE2 's extensible design enables developers to add
functionality via pluggable analysis modules. It also
acts as a front-end for a parallel and distributed
performance evaluation environment. With PIPE2, users
are able to design and evaluate performance queries
expressed in the Performance Tree formalism.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "GSPNs; parallel and distributed computing; performance
trees; PIPE2; stochastic modelling",
}
@Article{Kwiatkowska:2009:PPM,
author = "Marta Kwiatkowska and Gethin Norman and David Parker",
title = "{PRISM}: probabilistic model checking for performance
and reliability analysis",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "40--45",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530882",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Probabilistic model checking is a formal verification
technique for the modelling and analysis of stochastic
systems. It has proved to be useful for studying a wide
range of quantitative properties of models taken from
many different application domains. This includes, for
example, performance and reliability properties of
computer and communication systems. In this paper, we
give an overview of the probabilistic model checking
tool PRISM, focusing in particular on its support for
continuous-time Markov chains and Markov reward models,
and how these can be used to analyse performability
properties.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kounev:2009:QPM,
author = "Samuel Kounev and Christofer Dutz",
title = "{QPME}: a performance modeling tool based on queueing
{Petri Nets}",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "46--51",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530883",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Queueing Petri nets are a powerful formalism that can
be exploited for modeling distributed systems and
analyzing their performance and scalability. By
combining the modeling power and expressiveness of
queueing networks and stochastic Petri nets, queueing
Petri nets provide a number of advantages. In this
paper, we present QPME (Queueing Petri net Modeling
Environment) --- a tool that supports the modeling and
analysis of systems using queueing Petri nets. QPME
provides an Eclipse-based editor for designing queueing
Petri net models and a powerful simulation engine for
analyzing the models. After presenting the tool, we
discuss the ongoing work on the QPME project and the
planned future enhancements of the tool.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Trivedi:2009:SAT,
author = "Kisho S. Trivedi and Robin Sahner",
title = "{SHARPE} at the age of twenty two",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "52--57",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530884",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper discusses the modeling tool called SHARPE
(Symbolic Hierarchical Automated Reliability and
Performance Evaluator), a general hierarchical modeling
tool that analyzes stochastic models of reliability,
availability, performance, and performability. It
allows the user to choose the number of levels of
models, the type of model at each level, and which
results from each model level are to act as which
parameters in which higher-level models. SHARPE
includes algorithms for analysis of fault trees,
reliability block diagrams, acyclic series-parallel
graphs, acyclic and cyclic Markov and semi-Markov
models, generalized stochastic Petri nets, and closed
single- and multi-chain product-form queueing networks.
For many of these, the user can choose among
alternative algorithms, and can decide whether to get a
result in the form of a distribution function (symbolic
in the time variable) or as a mean or probability.
SHARPE has been useful to students, practicing
engineers, and researchers. In this paper we discuss
the history of SHARPE, give some examples of its use,
and talk about some lessons learned.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciardo:2009:AFS,
author = "Gianfranco Ciardo and Andrew S. Miner and Min Wan",
title = "Advanced features in {SMART}: the stochastic model
checking analyzer for reliability and timing",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "58--63",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530885",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We describe some of the advanced features of the
software tool SmArT, the Stochastic Model checking
Analyzer for Reliability and Timing. Initially
conceived as a software package for numerical solution
and discrete-event simulation of stochastic models,
SmArT now also provides powerful model-checking
capabilities, thanks to its extensive use of various
forms of decision diagrams, which in turn also greatly
increase the efficiency of its stochastic analysis
algorithms. These aspects make it an excellent choice
when tackling systems with extremely large state
spaces.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:2009:TIM,
author = "Edmundo {de Souza e Silva} and Daniel R. Figueiredo
and Rosa M. M. Le{\~a}o",
title = "The {TANGRAMII} integrated modeling environment for
computer systems and networks",
journal = j-SIGMETRICS,
volume = "36",
number = "4",
pages = "64--69",
month = mar,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1530873.1530886",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:32:42 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The TANGRAM-II tool has been developed aiming at
supporting the performance analyst throughout the
entire modeling process, from model construction and
model solution to experimentation. The tool has a
powerful user interface that can be tailored to
specific problem domain, it includes a rich set of
analytic solution techniques, distinct options for
obtaining the measures of interest, a hybrid fluid and
event driven simulator, visualization features to
follow the model's evolution with time, traffic
generators and active measurement techniques to assist
the user in performing computer networking
experimentation. These and additional characteristics
make TANGRAM-II a unique tool for research and
education.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lelarge:2009:ECE,
author = "Marc Lelarge",
title = "Efficient control of epidemics over random networks",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "1--12",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555351",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555351",
abstract = "Motivated by the modeling of the spread of viruses or
epidemics with coordination among agents, we introduce
a new model generalizing both the basic contact model
and the bootstrap percolation. We analyze this
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pathak:2009:BSC,
author = "Abhinav Pathak and Feng Qian and Y. Charlie Hu and Z.
Morley Mao and Supranamaya Ranjan",
title = "Botnet spam campaigns can be long lasting: evidence,
implications, and analysis",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "13--24",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555352",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555352",
abstract = "Accurately identifying spam campaigns launched by a
large number of bots in a botnet allows for accurate
spam campaign signature generation and hence is
critical to defeating spamming botnets. \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Torres:2009:IUB,
author = "Ruben D. Torres and Mohammad Y. Hajjat and Sanjay G.
Rao and Marco Mellia and Maurizio M. Munafo",
title = "Inferring undesirable behavior from {P2P} traffic
analysis",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "25--36",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555353",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555353",
abstract = "While peer-to-peer (P2P) systems have emerged in
popularity in recent years, their large-scale and
complexity make them difficult to reason about. In this
paper, we argue that systematic analysis of traffic
characteristics \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anand:2009:RNT,
author = "Ashok Anand and Chitra Muthukrishnan and Aditya Akella
and Ramachandran Ramjee",
title = "Redundancy in network traffic: findings and
implications",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "37--48",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555355",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555355",
abstract = "A large amount of popular content is transferred
repeatedly across network links in the Internet. In
recent years, protocol-independent redundancy
elimination, which can remove duplicate strings from
within arbitrary \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jin:2009:UCN,
author = "Yu Jin and Esam Sharafuddin and Zhi-Li Zhang",
title = "Unveiling core network-wide communication patterns
through application traffic activity graph
decomposition",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "49--60",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555356",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555356",
abstract = "As Internet communications and applications become
more complex,operating, managing and securing networks
have become increasingly challenging tasks. There are
urgent demands for more sophisticated techniques for
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramasubramanian:2009:TIL,
author = "Venugopalan Ramasubramanian and Dahlia Malkhi and
Fabian Kuhn and Mahesh Balakrishnan and Archit Gupta
and Aditya Akella",
title = "On the treeness of {Internet} latency and bandwidth",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "61--72",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555357",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555357",
abstract = "Existing empirical studies of Internet structure and
path properties indicate that the Internet is
tree-like. This work quantifies the degree to which at
least two important Internet measures--latency
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Meiners:2009:TTA,
author = "Chad R. Meiners and Alex X. Liu and Eric Torng",
title = "Topological transformation approaches to optimizing
{TCAM}-based packet classification systems",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "73--84",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555359",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555359",
abstract = "Several range reencoding schemes have been proposed to
mitigate the effect of range expansion and the
limitations of small capacity, large power consumption,
and high heat generation of TCAM-based packet
classification \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shen:2009:RDP,
author = "Kai Shen and Christopher Stewart and Chuanpeng Li and
Xin Li",
title = "Reference-driven performance anomaly identification",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "85--96",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555360",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555360",
abstract = "Complex system software allows a variety of execution
conditions on system configurations and workload
properties. This paper explores a principled use of
reference executions--those of similar execution
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2009:NWS,
author = "Gagan R. Gupta and Sujay Sanghavi and Ness B. Shroff",
title = "Node weighted scheduling",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "97--108",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555361",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555361",
abstract = "This paper proposes a new class of online policies for
scheduling in input-buffered crossbar switches. Given
an initial configuration of packets at the input
buffers, these policies drain all packets in the system
in the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chaintreau:2009:AGS,
author = "Augustin Chaintreau and Jean-Yves {Le Boudec} and
Nikodin Ristanovic",
title = "The age of gossip: spatial mean field regime",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "109--120",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555363",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555363",
abstract = "Disseminating a piece of information, or updates for a
piece of information, has been shown to benefit greatly
from simple randomized procedures, sometimes referred
to as gossiping, or epidemic algorithms. Similarly, in
a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bodas:2009:SMC,
author = "Shreeshankar Bodas and Sanjay Shakkottai and Lei Ying
and R. Srikant",
title = "Scheduling in multi-channel wireless networks: rate
function optimality in the small-buffer regime",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "121--132",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555364",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555364",
abstract = "We consider the problem of designing scheduling
algorithms for the downlink of cellular wireless
networks where bandwidth is partitioned into tens to
hundreds of parallel channels, each of which can be
allocated \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rajagopalan:2009:NAT,
author = "Shreevatsa Rajagopalan and Devavrat Shah and Jinwoo
Shin",
title = "Network adiabatic theorem: an efficient randomized
protocol for contention resolution",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "133--144",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555365",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555365",
abstract = "The popularity of Aloha -like algorithms for
resolution of contention between multiple entities
accessing common resources is due to their extreme
simplicity and distributed nature. Example applications
of such algorithms \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharma:2009:DDC,
author = "Abhishek B. Sharma and Leana Golubchik and Ramesh
Govindan and Michael J. Neely",
title = "Dynamic data compression in multi-hop wireless
networks",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "145--156",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555367",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555367",
abstract = "Data compression can save energy and increase network
capacity in wireless sensor networks. However, the
decision of whether and when to compress data can
depend upon platform hardware, topology, wireless
channel \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2009:OPA,
author = "Anshul Gandhi and Mor Harchol-Balter and Rajarshi Das
and Charles Lefurgy",
title = "Optimal power allocation in server farms",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "157--168",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555368",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555368",
abstract = "Server farms today consume more than 1.5\% of the
total electricity in the U.S. at a cost of nearly \$4.5
billion. Given the rising cost of energy, many
industries are now seeking solutions for how to best
make use of their \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coskun:2009:EIJ,
author = "Ayse K. Coskun and Richard Strong and Dean M. Tullsen
and Tajana Simunic Rosing",
title = "Evaluating the impact of job scheduling and power
management on processor lifetime for chip
multiprocessors",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "169--180",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555369",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555369",
abstract = "Temperature-induced reliability issues are among the
major challenges for multicore architectures. Thermal
hot spots and thermal cycles combine to degrade
reliability. This research presents new
reliability-aware job \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2009:UIC,
author = "Feng Chen and David A. Koufaty and Xiaodong Zhang",
title = "Understanding intrinsic characteristics and system
implications of flash memory based solid state drives",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "181--192",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555371",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555371",
abstract = "Flash Memory based Solid State Drive (SSD) has been
called a ``pivotal technology'' that could
revolutionize data storage systems. Since SSD shares a
common interface with the traditional hard disk drive
(HDD), both physically \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schroeder:2009:DEW,
author = "Bianca Schroeder and Eduardo Pinheiro and
Wolf-Dietrich Weber",
title = "{DRAM} errors in the wild: a large-scale field study",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "193--204",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555372",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555372",
abstract = "Errors in dynamic random access memory (DRAM) are a
common form of hardware failure in modern compute
clusters. Failures are costly both in terms of hardware
replacement costs and service disruption. While a
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mi:2009:RUI,
author = "Ningfang Mi and Alma Riska and Xin Li and Evgenia
Smirni and Erik Riedel",
title = "Restrained utilization of idleness for transparent
scheduling of background tasks",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "205--216",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555373",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555373",
abstract = "A common practice in system design is to treat
features intended to enhance performance and
reliability as low priority tasks by scheduling them
during idle periods, with the goal to keep these
features transparent to the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2009:NSB,
author = "Yi Wang and Michael Schapira and Jennifer Rexford",
title = "Neighbor-specific {BGP}: more flexible routing
policies while improving global stability",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "217--228",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555375",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555375",
abstract = "The Border Gateway Protocol (BGP) offers network
administrators considerable flexibility in controlling
how traffic flows through their networks. However, the
interaction between routing policies in different
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Laoutaris:2009:DTB,
author = "Nikolaos Laoutaris and Georgios Smaragdakis and Pablo
Rodriguez and Ravi Sundaram",
title = "Delay tolerant bulk data transfers on the {Internet}",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "229--238",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555376",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555376",
abstract = "Many emerging scientific and industrial applications
require transferring multiple Tbytes of data on a daily
basis. Examples include pushing scientific data from
particle accelerators/colliders to laboratories around
the \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jiang:2009:CCD,
author = "Wenjie Jiang and Rui Zhang-Shen and Jennifer Rexford
and Mung Chiang",
title = "Cooperative content distribution and traffic
engineering in an {ISP} network",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "239--250",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555377",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555377",
abstract = "Traditionally, Internet Service Providers (ISPs) make
profit by providing Internet connectivity, while
content providers (CPs) play the more lucrative role of
delivering content to users. As network connectivity is
increasingly a \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2009:LDS,
author = "Edith Cohen and Haim Kaplan",
title = "Leveraging discarded samples for tighter estimation of
multiple-set aggregates",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "251--262",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555379",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555379",
abstract = "Many datasets, including market basket data, text or
hypertext documents, and events recorded in different
locations or time periods, can be modeled as a
collection of sets over a ground set of keys. Common
queries \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Loiseau:2009:MLE,
author = "Patrick Loiseau and Paulo Gon{\c{c}}alves and
St{\'e}phane Girard and Florence Forbes and Pascale
Vicat-Blanc Primet",
title = "Maximum likelihood estimation of the flow size
distribution tail index from sampled packet data",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "263--274",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555380",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555380",
abstract = "In the context of network traffic analysis, we address
the problem of estimating the tail index of flow (or
more generally of any group) size distribution from the
observation of a sampled population of packets
\ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Qiu:2009:MCP,
author = "Tongqing Qiu and Zihui Ge and Seungjoon Lee and Jia
Wang and Qi Zhao and Jun Xu",
title = "Modeling channel popularity dynamics in a large {IPTV}
system",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "275--286",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555381",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555381",
abstract = "Understanding the channel popularity or content
popularity is an important step in the workload
characterization for modern information distribution
systems (e.g., World Wide Web, peer-to-peer
file-sharing systems, \ldots{}).",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Harchol-Balter:2009:SRT,
author = "Mor Harchol-Balter and Alan Scheller-Wolf and Andrew
R. Young",
title = "Surprising results on task assignment in server farms
with high-variability workloads",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "287--298",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555383",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555383",
abstract = "This paper investigates the performance of task
assignment policies for server farms, as the
variability of job sizes (service demands) approaches
infinity. Our results reveal that some common wisdoms
regarding task \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sandholm:2009:MOU,
author = "Thomas Sandholm and Kevin Lai",
title = "{MapReduce} optimization using regulated dynamic
prioritization",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "299--310",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555384",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555384",
abstract = "We present a system for allocating resources in shared
data and compute clusters that improves MapReduce job
scheduling in three ways. First, the system uses
regulated and user-assigned priorities to offer
different \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2009:SAA,
author = "Varun Gupta and Mor Harchol-Balter",
title = "Self-adaptive admission control policies for
resource-sharing systems",
journal = j-SIGMETRICS,
volume = "37",
number = "1",
pages = "311--322",
month = jun,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/2492101.1555385",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Wed Jun 8 06:55:47 MDT 2022",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
URL = "https://dl.acm.org/doi/10.1145/2492101.1555385",
abstract = "We consider the problem of admission control in
resource sharing systems, such as web servers and
transaction processing systems, when the job size
distribution has high variability, with the aim of
minimizing the mean \ldots{}",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Scheuermann:2009:WSS,
author = "Bj{\"o}rn Scheuermann and Wolfgang Kiess",
title = "Who said that?: the send-receive correlation problem
in network log analysis",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "3--5",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639564",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When analyzing packet log files from network
experiments, the question which received packet belongs
to which send event arises. If non-unique (i.e.,binary
identical) transmissions have occurred, this
send-receive correlation problem can become very
challenging. We discuss this problem in the case of
networks with local broadcast media, and outline first
directions how it can be solved.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anandkumar:2009:SRM,
author = "Animashree Anandkumar and Chatschik Bisdikian and Ting
He and Dakshi Agrawal",
title = "Selectively retrofitting monitoring in distributed
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "6--8",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639565",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current distributed systems carry legacy subsystems
lacking sufficient instrumentation for monitoring the
end-to-end business transactions supported by these
systems. In the absence of instrumentation, only
probabilistic monitoring is possible by using
time-stamped log-records. Retro fitting these systems
with expensive monitoring instrumentation provides
high-granularity, precise tracking of transactions.
Given a limited budget, local instrumentation
strategies which maximize the effectiveness of
monitoring transactions throughout the system are
proposed. The operation of the end-to-end system is
modeled by a queuing network; each queue represents a
subsystem which produces time-stamped log-records as
transactions pass through it. Two simple heuristics for
instrumentation are proposed which become optimal under
certain conditions. One heuristic selects states in the
transition diagram for local instrumentation in the
decreasing order of the load factors of their queues.
Sufficient conditions for this load-factor heuristic to
be optimal are proven using the notion of stochastic
order. The other heuristic selects states in the
transition diagram based on the approximated tracking
accuracy of probabilistic monitoring at each state,
which is shown to be tight at low arrival rates.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bipartite matching; probabilistic transaction
monitoring; queuing networks; stochastic comparison",
}
@Article{Dubey:2009:PMD,
author = "Abhishek Dubey and Rajat Mehrotra and Sherif
Abdelwahed and Asser Tantawi",
title = "Performance modeling of distributed multi-tier
enterprise systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "9--11",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639566",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2009:DCR,
author = "Chao Wang and Xiaoli Ma",
title = "Deriving {Cram{\'e}r--Rao} bounds and maximum
likelihood estimators for traffic matrix inference",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "12--14",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639567",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Traffic matrix estimation has caught numerous
attentions these days due to its importance on network
management tasks such as traffic engineering and
capacity planning for Internet Service Providers (ISP).
Various estimation models and methods have been
proposed to estimate the traffic matrix. However, it is
difficult to compare these methods since they adopt
different model assumptions. Currently most evaluations
are based on some particular realization of data. We
propose to use the (Bayesian) Cram{\'e}r--Rao Bound
(CRB) as a benchmark on these estimators. We also
derive the maximum likelihood estimator (MLE) for
certain models. With coupled mean and variance, our
simulations show that the least squares (LS) estimator
reaches the CRB asymptotically, while the MLEs are
difficult to calculate when the dimension is high.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krioukov:2009:GFS,
author = "Dmitri Krioukov and Fragkiskos Papadopoulos and
Mari{\'a}n Bogu{\~n}{\'a} and Amin Vahdat",
title = "Greedy forwarding in scale-free networks embedded in
hyperbolic metric spaces",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "15--17",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639568",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cho:2009:BTB,
author = "Jeong-woo Cho and Yuming Jiang",
title = "Basic theorems on the backoff process in 802.11",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "18--20",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639569",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2009:OJF,
author = "Jayakrishnan Nair and Steven H. Low",
title = "Optimal job fragmentation",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "21--23",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639570",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2009:EAL,
author = "Erlin Yao and Yungang Bao and Guangming Tan and Mingyu
Chen",
title = "Extending {Amdahl's Law} in the multicore era",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "24--26",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639571",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korzun:2009:LEM,
author = "Dmitry Korzun and Andrei Gurtov",
title = "A local equilibrium model for {P2P} resource ranking",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "27--29",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639572",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many Peer-to-Peer (P2P) systems rely on cooperation
among nodes that should be supported with incentives.
Introducing ranks into P2P designs could reward
cooperating nodes and increase overall system
performance. In this paper, we consider the problem of
P2P ranking. In a P2P resource sharing system (RSS),
the ranks allow a node to decide which sources to keep
locally, which external resources to download and
through which nodes, what control to apply for transit
resource requests, and how much quality of service
(QoS) to provide. We introduce a mathematical model for
local P2P resource ranking that optimizes these
decisions. Complete proofs can be found in our
technical report.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2009:MCAa,
author = "Daniel Sadoc Menasch{\'e} and Antonio A. Arag{\~a}o
Rocha and Edmundo {de Souza e Silva} and Rosa M. Meri
Le{\~a}o and Don Towsley and Arun Venkataramani",
title = "Modeling chunk availability in {P2P} swarming
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "30--32",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639573",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hohlfeld:2009:VIV,
author = "Oliver Hohlfeld and Florin Ciucu",
title = "Viewing impaired video transmissions from a modeling
perspective",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "33--35",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639574",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2009:WOS,
author = "Gagan R. Gupta and Sujay Sanghavi and Ness B. Shroff",
title = "Workload optimality in switches without arrivals",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "36--38",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639575",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze a switch with cross-bar constraints.
Beginning with an initial loading and no further
arrivals, we provide necessary conditions for a
scheduling policy to minimize the workload at all
times. We show that these conditions are sufficient for
a switch of size N x 3 or smaller. We then consider a
weaker notion of optimality: cumulative average
workload optimality. Using a counter example for a 7 x
7 switch, we show that it is not possible to
approximate the cumulative average workload within
(1+4/475) of the optimal at all times. We conjecture
that the workload under the MVM policy is within twice
of the optimal at all times.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Andrew:2009:OSS,
author = "Lachlan L. H. Andrew and Adam Wierman and Ao Tang",
title = "Optimal speed scaling under arbitrary power
functions",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "39--41",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639576",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates the performance of online
dynamic speed scaling algorithms for the objective of
minimizing a linear combination of energy and response
time. We prove that (SRPT, {\em P\/}$^{- - 1}$ ({\em
n\/})), which uses Shortest Remaining Processing Time
(SRPT) scheduling and processes at speed such that the
power used is equal to the queue length, is
2-competitive for a very wide class of power-speed
tradeoff functions. Further, we prove that there exist
tradeoff functions such that no online algorithm can
attain a competitive ratio less than 2.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Verloop:2009:HTA,
author = "I. M. Verloop and U. Ayesta and R.
N{\'u}{\~n}ez-Queija",
title = "Heavy-traffic analysis of the {M\slash PH\slash 1}
discriminatory processor sharing queue with
phase-dependent weights",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "42--44",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639577",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We analyze a generalization of the Discriminatory
Processor Sharing (DPS)queue in a heavy-traffic
setting. Customers present in the system are served
simultaneously at rates controlled by a vector of
weights. We assume phase-type distributed service
requirements and allow that customers have different
weights in various phases of their service. We
establish a state-space collapse for the queue length
vector in heavy traffic. The result shows that in the
limit, the queue length vector is the product of an
exponentially distributed random variable and a
deterministic vector. This generalizes a previous
result by [2] who considered a DPS queue with
exponentially distributed service requirements. We
finally discuss some implications for residual service
requirements and monotonicity properties in the
ordinary DPS model.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2009:IAS,
author = "J. Anselmi and Y. Lu and M. Sharma and M. S.
Squillante",
title = "Improved Approximations for Stochastic Loss Networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "45--47",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639578",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Weingartner:2009:TAI,
author = "Elias Weing{\"a}rtner and Florian Schmidt and Tobias
Heer and Klaus Wehrle",
title = "Time accurate integration of software prototypes with
event-based network simulations",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "49--50",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639580",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The concept of network emulation brings together the
flexibility of network simulations and the accuracy of
real-world prototype implementations. However, this
approach suffers from the fundamental problem of
simulation overload which occurs if the simulation is
not able to execute in real-time. We tackle this
problem with a concept we call Synchronized Network
Emulation It enables the time accurate integration of
implementations with network simulations of any
complexity.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2009:ETC,
author = "Haifeng Chen and Wenxuan Zhang and Guofei Jiang",
title = "Experience transfer for the configuration tuning in
large scale computing systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "51--52",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639581",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "configuration tuning; distributed systems; knowledge
acquisition; knowledge reuse",
}
@Article{Lin:2009:RID,
author = "Bill Lin and Jun (Jim) Xu and Nan Hua and Hao Wang and
Haiquan (Chuck) Zhao",
title = "A randomized interleaved {DRAM} architecture for the
maintenance of exact statistics counters",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "53--54",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639582",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We extend a previously proposed randomized interleaved
DRAM architecture [1] that can maintain wirespeed
updates (say 40 Gb/s) to a large array (say millions)
of counters. It works by interleaving updates to
randomly distributed counters across multiple memory
banks. Though unlikely, an adversary can conceivably
overload a memory bank by triggering frequent updates
to the same counter. In this work, we show this
'attack' can be mitigated through caching pending
updates, which can catch repeated updates to the same
counter within a sliding time window. While this
architecture of combining randomization with caching is
simple and straightforward, the primary contribution of
this work is to rigorously prove that it can handle
with overwhelming probability all adversarial update
patterns, using a combination of tail bound techniques,
convex ordering theory, and queueing analysis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "convex ordering; statistics counters; tail bound",
}
@Article{Zhao:2009:MPA,
author = "Bridge Zhao and y. K. Li and John C. S. Lui and
Dah-Ming Chiu",
title = "On modeling product advertisement in social networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "55--56",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639583",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Advertising via social networks is receiving more
attention these days. Given a fixed investment (e.g.,
free samples), a company needs to find out the final
probability that users will purchase the product. In
this paper we characterize and model various influence
mechanisms that govern the word-of-mouth spread of
advertisements in large social networks. We use the
local mean field (LMF) technique to analyze large scale
networks wherein states of nodes can be changed by
various influence mechanisms. Extensive simulations are
carried out to validate the accuracy of our model, and
the results also provide insights on designing
advertising strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "economics; epidemics; influence model; local mean
field",
}
@Article{Zahn:2009:ESF,
author = "Thomas Zahn and Greg O'Shea and Antony Rowstron",
title = "An empirical study of flooding in mesh networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "57--58",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639584",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Triukose:2009:CDN,
author = "Sipat Triukose and Zhihua Wen and Michael Rabinovich",
title = "Content delivery networks: how big is big enough?",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "59--60",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639585",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The central question addressed in this paper is
whether a content delivery network (CDN)needs to deploy
its servers in a large number of locations to achieve
its current levels of performance. Our study indicates
that a relatively small number of consolidated data
centers might provide similar performance to end-users.
or over 30\%of the total 34,000 servers claimed by
Akamai during the study period, were pingable.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2009:SFM,
author = "Zhibin Yu and Hai Jin",
title = "Simple and fast micro-architecture simulation: a
trisection {Cantor} fractal approach",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "61--62",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639586",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the prohibitively long time when detailedly
simulating a realistic benchmark to its completion,
sampling is frequently used to reduce the simulation
time. However, it may often require profiling or
iterative simulations to determine the sampling
parameters. This paper employs the generation procedure
of trisection Cantor set, one classic fractal, to
select instructions simulated in detail as an approach
to enable a simple and fast micro-architecture
simulation. Randomly selected six benchmarks from SPEC
CPU2000 are tested on the simulator, CantorSim, which
implements the trisection Cantor fractal approach. The
results show that it is very easy to use this approach
and it can achieve actual average acceleration of
23.4\% over SMARTS [3] while the accuracy only reduces
marginally. CantorSims accuracy is validated against
the sim-outorder and is accurate in a 3.2\% error
margin. Similar CPI relative errors with the same
parameter values of experiments on simulators of
different processor models indicate that this approach
is micro-architecture independent and can be applied to
well predict the performance of new micro-architecture
design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cantor set; cycle-accurate simulation; fractal
geometry; micro-architecture simulator; performance
evaluation",
}
@Article{Key:2009:RGE,
author = "Peter Key and Alexandre Proutiere",
title = "Routing games with elastic traffic",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "63--64",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639587",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we introduce and investigate a novel
class of multipath routing games with elastic traffic.
Users open one or more connections along different
feasible paths from source to destination and act
selfishly--seeking to transfer data as fast as
possible. Users only control their routing choices,
and once these choices have been made, the connection
rates are elastic and determined via congestion control
algorithms (e.g. TCP) which ultimately maximize a
certain notion of the network utility. We analyze the
existence and the performance of the Nash Equilibria
(NEs) of the resulting routing games.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lange:2009:ESI,
author = "John R. Lange and J. Scott Miller and Peter A. Dinda",
title = "{EmNet}: satisfying the individual user through
empathic home networks: summary",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "65--66",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639588",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "empathic systems; human factors; networks",
}
@Article{Riska:2009:EDL,
author = "Alma Riska and Erik Riedel",
title = "Evaluation of disk-level workloads at different time
scales",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "67--68",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639589",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reddy:2009:MDC,
author = "Vinith Reddy and Younghoon Kim and Srinivas Shakkottai
and A. L. Narasimha Reddy",
title = "{MultiTrack}: a delay and cost aware {P2P} overlay
architecture",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "69--70",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639590",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borst:2009:SOA,
author = "Sem Borst and Varun Gupta and Anwar Walid",
title = "Self-organizing algorithms for cache cooperation in
content distribution networks",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "71--72",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639591",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rubinstein:2009:SPA,
author = "Benjamin I. P. Rubinstein and Blaine Nelson and Ling
Huang and Anthony D. Joseph and Shing-hon Lau and
Satish Rao and Nina Taft and J. D. Tygar",
title = "Stealthy poisoning attacks on {PCA}-based anomaly
detectors",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "73--74",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639592",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider systems that use PCA-based detectors
obtained from a comprehensive view of the network's
traffic to identify anomalies in backbone networks. To
assess these detectors' susceptibility to adversaries
wishing to evade detection, we present and evaluate
short-term and long-term data poisoning schemes that
trade-off between poisoning duration and the volume of
traffic injected for poisoning. Stealthy Boiling Frog
attacks significantly reduce chaff volume,while only
moderately increasing poisoning duration. ROC curves
provide a comprehensive analysis of PCA-based detection
on contaminated data, and show that even small attacks
can undermine this otherwise successful anomaly
detector.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "adversarial learning; network traffic analysis;
principal components analysis",
}
@Article{Down:2009:SDR,
author = "Douglas G. Down and H. Christian Gromoll and Amber L.
Puha",
title = "State-dependent response times via fluid limits in
shortest remaining processing time queues",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "75--76",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639593",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a single server queue with renewal
arrivals and i.i.d. service times, in which the server
employs the Shortest Remaining Processing Time (SRPT)
policy. We provide a fluid model (or formal law of
large numbers approximation) for this system. The
foremost payoff of our fluid model is a fluid level
approximation for the state-dependent response time of
a job of arbitrary size, that is, the amount of time it
spends in the system, given an arbitrary system
configuration at the time of its arrival.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2009:SPP,
author = "Jianwei Chen and Murali Annavaram and Michel Dubois",
title = "{SlackSim}: a platform for parallel simulations of
{CMPs} on {CMPs}",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "77--78",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639594",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel simulation is a technique to accelerate
microarchitecture simulation of target CMPs by
exploiting the inherent parallelism of host CMPs. In
this paper, we explore the simulation paradigm of
simulating each core of a target CMP in one thread and
the spreading the threads across the hardware thread
contexts of a host CMP. We introduce the concept of
slack simulation where the Pthreads simulating
different target cores do not synchronize after each
simulated cycle, but rather they are given some slack.
The slack is the difference in cycles between the
simulated times of any two target cores. Small
slacks,such as a few cycles, greatly improve the
efficiency of parallel CMP simulations, with no or
negligible simulation error. We have developed a
simulation framework called SlackSim to experiment with
various slack simulation schemes. Unlike previous
attempts to parallelize multiprocessor simulations on
distributed memory machines, SlackSim takes advantage
of the efficient sharing of data in the host CMP
architecture. We demonstrate the efficiency and
accuracy of some well-known slack simulation schemes
and of some new ones on SlackSim running on a
state-of-the-art CMP platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulati:2009:EAP,
author = "Ajay Gulati and Arif Merchant and Mustafa Uysal and
Pradeep Padala and Peter Varman",
title = "Efficient and adaptive proportional share {I/O}
scheduling",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "79--80",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639595",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2009:DDS,
author = "Yang Liu and Linfeng Zhang and Yong Guan",
title = "A distributed data streaming algorithm for
network-wide traffic anomaly detection",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "81--82",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639596",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Nowadays, Internet has serious security problems and
network failures that are hard to resolve, for example,
botnet attacks, polymorphic worm\slash virus spreading,
DDoS, and flash crowds. To address many of these
problems, we need to have a network-wide view of the
traffic dynamics, and more importantly, be able to
detect traffic anomaly in a timely manner. To our
knowledge, Principle Component Analysis (PCA)is the
best-known spatial detection method for the
network-wide traffic anomaly. However, existing
PCA-based solutions have scalability problems in that
they require $ O(m^2 n) $ running time and $ O(m n) $
space to analyze traffic measurements from $m$
aggregated traffic flows within a sliding window of the
length $n$. We propose a novel data streaming algorithm
for PCA-based network-wide traffic anomaly detection in
a distributed fashion. Our algorithm can archive $ O(w
n \log n)$ running time and $ O(w n)$ space at local
monitors,and $ O(m^2 \log n)$ running time and $ O(m
\log n)$ space at Network Operation Center (NOC), where
$w$ denotes the maximum number of traffic flows at a
local monitor.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baccelli:2009:TMA,
author = "Fran{\c{c}}ois Baccelli and Bruno Kauffmann and Darryl
Veitch",
title = "Towards multihop available bandwidth estimation",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "83--84",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639597",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We evaluate the algorithm proposed in [1], which
estimates the residual bandwidth on each hop of an
Internet path using a para-metric model which consists
of a Kelly queueing network. The evaluation is driven
by simulation based on real network traces over a two
node path. Correction factors are proposed and
evaluated to cope with deviations from model
assumptions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nandi:2009:WMU,
author = "Animesh Nandi and Bobby Bhattacharjee and Peter
Druschel",
title = "What a mesh: understanding the design tradeoffs for
streaming multicast",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "85--86",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639598",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cooperative end-system multicast (CEM) is a promising
paradigm for Internet video distribution. Several CEM
systems have been proposed and deployed, but the
tradeoffs inherent in the different designs are not
well understood. In this work, we provide a common
framework in which different CEM design choices can be
empirically and systematically evaluated. Based on our
results, we conjecture that all CEM systems must abide
by a set of fundamental design constraints, which we
express in a simple model. By necessity, existing
system implementations couple the data- and
control-planes and often use different transport
protocols.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2009:MCAb,
author = "Daniel Sadoc Menasche and Antonio A. Aragao Rocha and
Bin Li and Don Towsley and Arun Venkataramani",
title = "Modeling content availability in peer-to-peer swarming
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "87--88",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639599",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Iyer:2009:VPA,
author = "Ravi Iyer and Ramesh Illikkal and Li Zhao and Don
Newell and Jaideep Moses",
title = "Virtual platform architectures for resource metering
in datacenters",
journal = j-SIGMETRICS,
volume = "37",
number = "2",
pages = "89--90",
month = sep,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1639562.1639600",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:33:11 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With cloud and utility computing models gaining
significant momentum, data centers are increasingly
employing virtualization and consolidation as a means
to support a large number of disparate applications
running simultaneously on a CMP server. In such
environments, it is important to meter the usage of
resources by each datacenter application so that
customers can be charged accordingly. In this paper, we
describe a simple metering and chargeback model
(pay-as-you-go) and describe a solution based on
virtual platform architectures (VPA) to accurately
meter visible as well as transparent resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Cache/Memory; CMP; performance; quality of service;
resource sharing principles; service level agreements",
}
@Article{Kant:2009:CDE,
author = "Krishna Kant",
title = "Challenges in distributed energy adaptive computing",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "3--7",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710117",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Fueled by burgeoning online services, energy
consumption in information technology (IT) equipment is
becoming a major concern from a variety of perspectives
including the continuation of Moore's Law for hardware
design, enabling sophisticated mobile client
functionality, mounting utility costs in data centers,
and increasing CO2 emissions associated with IT
manufacturing, distribution, usage and disposal. This
article discusses an approach where energy consumption
and related issues of heat dissipation and
sustainability are considered as the primary concerns
that drive the way computation and communication is
organized at both clients and servers. This article
describes the challenges in supporting such a
distributed energy adaptive computing paradigm.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pan:2009:GBB,
author = "Xinghao Pan and Jiaqi Tan and Soila Kavulya and Rajeev
Gandhi and Priya Narasimhan",
title = "{Ganesha}: black-box diagnosis of {MapReduce}
systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "8--13",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ganesha aims to diagnose faults transparently (in a
black-box manner) in MapReduce systems, by analyzing
OS-level metrics. Ganesha's approach is based on
peer-symmetry under fault-free conditions, and can
diagnose faults that manifest asymmetrically at nodes
within a MapReduce system. We evaluate Ganesha by
diagnosing Hadoop problems for the Gridmix Hadoop
benchmark on 10-node and 50-node MapReduce clusters on
Amazon's EC2. We also candidly highlight faults that
escape Ganesha's diagnosis.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anand:2009:NNN,
author = "Ashok Anand and Aditya Akella",
title = "{NetReplay}: a new network primitive",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "14--19",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we describe Net-Replay, a new network
primitive to help application end points conduct
in-band characterization of the glitches they
encountered. In Net-Replay, each network infrastructure
element remembers a small amount of information for
every packet observed at the element over a certain
time interval. Furthermore, network elements expose a
simple 'packet marking' interface, using which they can
indicate to end-points whether or not they had seen a
particular packet in the past. When application
end-points observe glitches, they replay (i.e.
retransmit) the packets which observed the glitch and
leverage feedback from network elements to determine
the type and location of the glitch encountered by the
packets. We discuss how end-host network stacks should
be modified to leverage Net-Replay in this fashion. We
also consider how network infrastructure can support
Net-Replay in a low-overhead fashion.\par
We argue that Net-Replay can enable applications to
detect a variety of glitches and react to them in an
accurate and informed manner, while ensuring that the
infrastructure stays simple and fast. We believe that
proactive support from the network in the form of
Net-Replay-like functionality is crucial to ensure
robust performance of future Internet applications,
many of which are likely to be highly demanding and far
less tolerant of network glitches than traditional
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Crocey:2009:QBE,
author = "Daniele Crocey and Marco Melliay and Emilio
Leonardiy",
title = "The quest for bandwidth estimation techniques for
large-scale distributed systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "20--25",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In recent years the research community has developed
many techniques to estimate the end-to-end available
bandwidth of an Internet path. This important metric
can be potentially exploited to optimize the
performance of several distributed systems and, even,
to improve the effectiveness of the congestion control
mechanism of TCP. Thus, it has been suggested that some
existing estimation techniques could be used for this
purpose. However, existing tools were not designed for
large-scale deployments and were mostly validated in
controlled settings, considering only one measurement
running at a time. In this paper, we argue that current
tools, while offering good estimates when used alone,
might not work in large-scale systems where several
estimations severely interfere with each other. We
analyze the properties of the measurement paradigms
employed today and discuss their functioning, study
their overhead and analyze their interference. Our
testbed results show that current techniques are
insufficient as they are. Finally, we will discuss and
propose some principles that should be taken into
account for including available bandwidth measurements
in large-scale distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keeton:2009:DYK,
author = "Kimberly Keeton and Pankaj Mehra and John Wilkes",
title = "Do you know your {IQ?}: a research agenda for
information quality in systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "26--31",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710121",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Information quality (IQ) is a measure of how fit
information is for a purpose. Sometimes called Quality
of Information (QoI) by analogy with Quality of Service
(QoS), it quantifies whether the correct information is
being used to make a decision or take an action. Not
understanding when information is of adequate quality
can lead to bad decisions and catastrophic effects,
including system outages, increased costs, lost revenue
-- and worse. Quantifying information quality can help
improve decision making, but the ultimate goal should
be to select or construct information producers that
have the appropriate balance between information
quality and the cost of providing it. In this paper, we
provide a brief introduction to the field, argue the
case for applying information quality metrics in the
systems domain, and propose a research agenda to
explore this space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data quality; goal-directed design; information
processing pipeline; information quality; IQ; modeling;
prediction; QoI; uncertainty",
}
@Article{Casale:2009:AGB,
author = "Giuliano Casale and Amir Kalbasi and Diwakar
Krishnamurthy and Jerry Rolia",
title = "Automatically generating bursty benchmarks for
multitier systems",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "32--37",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710122",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Burstiness in resource consumption of requests has
been recently observed to be a fundamental performance
driver for multi-tier applications. This motivates the
need for a methodology to create benchmarks with
controlled burstiness that helps to improve the
effectiveness of system sizing efforts and makes
application testing more comprehensive. We tackle this
problem using a model-based technique for the automatic
and controlled generation of bursty benchmarks.
Phase-type models are constructed in an automated
manner to model the distribution of service demands
placed by user sessions on various system resources.
The models are then used to derive session submission
policies that result in user-specified levels of
service demand burstiness for resources at the
different tiers in a system. A case study using a
three-tier TPC-W testbed shows that our method is able
to control and predict burstiness for session service
demands and to cause dramatic latency and throughput
degradations that are not visible with the same session
mix and no burstiness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hellerstein:2009:ACT,
author = "Joseph L. Hellerstein and Vance Morrison and Eric
Eilebrecht",
title = "Applying control theory in the real world: experience
with building a controller for the {.NET} thread pool",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "38--42",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710123",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "There has been considerable interest in using control
theory to build web servers, database managers, and
other systems. We claim that the potential value of
using control theory cannot be realized in practice
without a methodology that addresses controller design,
testing, and tuning. Based on our experience with
building a controller for the .NET thread pool, we
develop a methodology that: (a) designs for
extensibility to integrate diverse control techniques,
(b) scales the test infrastructure to enable running a
large number of test cases, (c) constructs test cases
for which the ideal controller performance is known a
priori so that the outcomes of test cases can be
readily assessed, and (d) tunes controller parameters
to achieve good results for multiple performance
metrics. We conclude by discussing how our methodology
can be extended, especially to designing controllers
for distributed systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Riska:2009:FRE,
author = "Alma Riska and Ningfang Mi and Evgenia Smirni and
Giuliano Casale",
title = "Feasibility regions: exploiting tradeoffs between
power and performance in disk drives",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "43--48",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710124",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Low utilization immediately suggests that placing the
system into a low power mode during idle times may
considerably decrease power consumption. As future
workload remains largely unknown, 'when' to initiate a
power saving mode and for 'how long' to stay in this
mode remains a challenging open problem, given that
performance degradation of future jobs should not be
compromised. We present a model and an algorithm that
manages to successfully explore feasible regions of
power and performance, and expose the system
limitations according to both measures. Extensive
analysis on a set of enterprise storage traces shows
the algorithm's robustness for successfully identifying
'when' and for 'how long' one should activate a power
saving mode given a set of power/performance targets
that are provided by the user.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Willinger:2009:ROS,
author = "Walter Willinger and Reza Rejaie and Mojtaba Torkjazi
and Masoud Valafar and Mauro Maggioni",
title = "Research on online social networks: time to face the
real challenges",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "49--54",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710125",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Online Social Networks (OSNs) provide a unique
opportunity for researchers to study how a combination
of technological, economical, and social forces have
been conspiring to provide a service that has attracted
the largest user population in the history of the
Internet. With more than half a billion of users and
counting, OSNs have the potential to impact almost
every aspect of networking, including measurement and
performance modeling and analysis, network architecture
and system design, and privacy and user behavior, to
name just a few. However, much of the existing OSN
research literature seems to have lost sight of this
unique opportunity and has avoided dealing with the new
challenges posed by OSNs. We argue in this position
paper that it is high time for OSN researcher to
exploit and face these challenges to provide a basic
understanding of the OSN ecosystem as a whole. Such an
understanding has to reflect the key role users play in
this system and must focus on the system's dynamics,
purpose and functionality when trying to illuminate the
main technological, economic, and social forces at work
in the current OSN revolution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tickoo:2009:MVM,
author = "Omesh Tickoo and Ravi Iyer and Ramesh Illikkal and Don
Newell",
title = "Modeling virtual machine performance: challenges and
approaches",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "55--60",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710126",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data centers are increasingly employing virtualization
and consolidation as a means to support a large number
of disparate applications running simultaneously on
server platforms. However, server platforms are still
being designed and evaluated based on performance
modeling of a single highly parallel application or a
set of homogeneous work-loads running simultaneously.
Since most future datacenters are expected to employ
server virtualization, this paper takes a look at the
challenges of modeling virtual machine (VM) performance
on a datacenter server. Based on vConsolidate (a server
virtualization benchmark) and latest multi-core
servers, we show that the VM modeling challenge
requires addressing three key problems: (a) modeling
the contention of visible resources (cores, memory
capacity, I/O devices, etc), (b) modeling the
contention of invisible resources (shared
microarchitecture resources, shared cache, shared
memory bandwidth, etc) and (c) modeling overheads of
virtual machine monitor (or hypervisor) implementation.
We take a first step to addressing this problem by
describing a VM performance modeling approach and
performing a detailed case study based on the
vConsolidate benchmark. We conclude by outlining
outstanding problems for future work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CMP; consolidation; measurement; modeling; performance
analysis; servers; virtualization",
}
@Article{Gulati:2009:MWD,
author = "Ajay Gulati and Chethan Kumar and Irfan Ahmad",
title = "Modeling workloads and devices for {IO} load balancing
in virtualized environments",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "61--66",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Virtualization has been effective in providing
performance isolation and proportional allocation of
resources, such as CPU and memory between VMs by using
automated distributed resource schedulers and VM
migration. Storage VMotion allows users to migrate
virtual hard disks from one data store to another
without stopping the virtual machine. There is a dire
need for an automated tool to manage storage resources
more effectively by doing virtual disk placement and
load balancing of workloads across multiple data
stores. Applicable beyond virtualization, this problem
is challenging because it requires modeling both
workloads and characterizing underlying devices.
Furthermore, device characteristics such as number of
disks backing a LUN, disk types etc. are hidden from
the hosts by the virtualization layer at the array. In
this paper, we propose a storage resource scheduler
(SRS) to manage virtual disk placement and automatic
load balancing using Storage VMotion. Our initial
results lead us to believe that we can effectively
model workloads and devices to improve overall storage
resource utilization in practice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fay:2009:WSM,
author = "Damien Fay and Hamed Haddadi and Andrew W. Moore and
Richard Mortier and Steve Uhlig and Almerima
Jamakovic",
title = "A weighted spectrum metric for comparison of
{Internet} topologies",
journal = j-SIGMETRICS,
volume = "37",
number = "3",
pages = "67--72",
month = dec,
year = "2009",
CODEN = "????",
DOI = "https://doi.org/10.1145/1710115.1710129",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:34:40 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Comparison of graph structures is a frequently
encountered problem across a number of problem domains.
Comparing graphs requires a metric to discriminate
which features of the graphs are considered important.
The spectrum of a graph is often claimed to contain all
the information within a graph, but the raw spectrum
contains too much information to be directly used as a
useful metric. In this paper we introduce a metric, the
weighted spectral distribution, that improves on the
raw spectrum by discounting those eigenvalues believed
to be unimportant and emphasizing the contribution of
those believed to be important.\par
We use this metric to optimize the selection of
parameter values for generating Internet topologies.
Our metric leads to parameter choices that appear
sensible given prior knowledge of the problem domain:
the resulting choices are close to the default values
of the topology generators and, in the case of some
generators, fall within the expected region. This
metric provides a means for meaningfully optimizing
parameter selection when generating topologies intended
to share structure with, but not match exactly,
measured graphs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Illikkal:2010:PQP,
author = "Ramesh Illikkal and Vineet Chadha and Andrew Herdrich
and Ravi Iyer and Donald Newell",
title = "{PIRATE}: {QoS} and performance management in {CMP}
architectures",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "3--10",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773396",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As new multi-threaded usage models such as
virtualization and consolidation take advantage of
multiple cores in CMP architectures, the impact of
shared resource contention between VMs and user-level
applications introduces Quality of Service(QoS)
concerns and challenges. QoS-aware management of these
shared platform resources is therefore becoming
increasingly important. Various QoS schemes for
resource management have been recently proposed, but
most of these prior efforts have been focused on
controlling individual resource allocation based on
priority information passed down from the OS or
Hypervisor to system resources. The complexity of this
approach increases when multiple levels of resources
are associated with an application's performance and
power consumption. In this paper we employ simpler
rate-based QoS mechanisms which control the execution
rate of competing applications. To enable
differentiation between simultaneously running
applications' performance and power consumption, these
rate mechanisms need to dynamically adjust the
execution of application. Our proposed PI-RATE
architecture introduces a control-theoretic approach to
dynamically adjust the execution rate of each
application based on the QoS target and monitored
resource utilization. We evaluate three modes of
PI-RATE architecture --- cache QoS targets, performance
QoS targets and power QoS targets --- to show that the
PI-RATE architecture is flexible and effective at
enabling QoS in a CMP platform.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clock modulation; frequency scaling; integral
controller; proportional",
}
@Article{Dube:2010:PLL,
author = "Parijat Dube and Li Zhang and David Daly and Alan
Bivens",
title = "Performance of large low-associativity caches",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "11--18",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "While it is known that lowering the associativity of
caches degrades cache performance, little is understood
about the degree of this effect or how to lessen the
effect, especially in very large caches. Most existing
works on cache performance are simulation or emulation
based and there is a lack of analytical\ models
characterizing performance in terms of different
configuration parameters such as line size, cache size,
associativity and workload specific parameters. We
develop analytical models to study performance of large
cache architectures by capturing the dependence of miss
ratio on associativity and other configuration
parameters. While high associativity may decrease cache
misses, for very large caches the corresponding
increase in hardware cost and power may be significant.
We use our models as well as simulation to study
different proposals for reducing misses in low
associativity caches, specifically, address space
randomization and victim caches. Our analysis provides
specific detail on the impact of these proposals, and a
clearer understanding of why they do or do not work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "address randomization; associativity; modeling; victim
cache",
}
@Article{Zhu:2010:ROW,
author = "Yaping Zhu and Jennifer Rexford and Subhabrata Sen and
Aman Shaikh",
title = "{Route Oracle}: where have all the packets gone?",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "19--25",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Many network-management problems in large backbone
networks need the answer to a seemingly simple
question: where does a given IP packet, entering the
network at a particular place and time, leave the
network to continue on its path to the destination?
Answering this question at scale and in real time is
challenging for several reasons: (i) a destination IP
address could match several IP prefixes, (ii) the
longest-matching prefix may change over time, (iii) the
number of IP prefixes and routing protocol messages is
very large, and (iv) network-management applications
often require answers to this question for a large
number of destination IP addresses in real time. In
this paper, we present an efficient algorithm for
tracking prefix-match changes for ranges of IP
addresses. We then present the design, implementation,
and evaluation of the Route Oracle tool that answers
queries about routing changes on behalf of network
management applications. Our design of Route Oracle
includes several performance optimizations, such as
pre-processing of BGP update messages, and
parallelization of query processing. Experiments with
BGP measurement data from a large ISP backbone
demonstrate that our system answers queries in real
time and at scale.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doebel:2010:TVP,
author = "Bjoern Doebel and Peter Nobel and Eno Thereska and
Alice Zheng",
title = "Towards versatile performance models for complex,
popular applications",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "26--33",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773399",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Perhaps surprisingly, no practical performance models
exist for popular (and complex) client applications
such as Adobe's Designer suite, Microsoft's Office
suite and Visual Studio, Mozilla, Halo 3, etc. There is
currently no tool that automatically answers program
developers', IT administrators' and end-users' simple
what-if questions like 'what happens to the performance
of my favorite application X if I upgrade from Windows
Vista to Windows 7?'. This paper describes directions
we are taking for constructing practical, versatile
performance models to address this problem.\par
The directions we have taken have two paths. The first
path involves instrumenting applications better to
export their state and associated metrics. This
application-specific monitoring is always on and
interesting data is collected from real, 'in-the-wild'
deployments. The second path involves statistical
modeling techniques. The models we are experimenting
with require no modifications to the OS or applications
beyond the above instrumentation, and no explicit {\em
a priori\/} model on how an OS or application should
behave. We are in the process of learning from models
we have constructed for several Microsoft products,
including the Office suite, Visual Studio and Media
Player. This paper presents preliminary findings from a
large user deployment (several hundred thousand user
sessions) of these applications that show the coverage
and limitations of such models.\par
Early indications from this work point towards future
modeling strategies based on large amounts of data
collected in the field. We present our thoughts on what
this could imply for the SIGMETRICS community.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mishra:2010:TCC,
author = "Asit K. Mishra and Joseph L. Hellerstein and Walfredo
Cirne and Chita R. Das",
title = "Towards characterizing cloud backend workloads:
insights from {Google} compute clusters",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "34--41",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The advent of cloud computing promises highly
available, efficient, and flexible computing services
for applications such as web search, email, voice over
IP, and web search alerts. Our experience at Google is
that realizing the promises of cloud computing requires
an extremely scalable backend consisting of many large
compute clusters that are shared by application tasks
with diverse service level requirements for throughput,
latency, and jitter. These considerations impact (a)
capacity planning to determine which machine resources
must grow and by how much and (b) task scheduling to
achieve high machine utilization and to meet service
level objectives.\par
Both capacity planning and task scheduling require a
good understanding of task resource consumption (e.g.,
CPU and memory usage). This in turn demands simple and
accurate approaches to workload
classification-determining how to form groups of tasks
(workloads) with similar resource demands. One approach
to workload classification is to make each task its own
workload. However, this approach scales poorly since
tens of thousands of tasks execute daily on Google
compute clusters. Another approach to workload
classification is to view all tasks as belonging to a
single workload. Unfortunately, applying such a
coarse-grain workload classification to the diversity
of tasks running on Google compute clusters results in
large variances in predicted resource
consumptions.\par
This paper describes an approach to workload
classification and its application to the Google Cloud
Backend, arguably the largest cloud backend on the
planet. Our methodology for workload classification
consists of: (1) identifying the workload dimensions;
(2) constructing task classes using an off-the-shelf
algorithm such as k-means; (3) determining the break
points for qualitative coordinates within the workload
dimensions; and (4) merging adjacent task classes to
reduce the number of workloads. We use the foregoing,
especially the notion of qualitative coordinates, to
glean several insights about the Google Cloud Backend:
(a) the duration of task executions is bimodal in that
tasks either have a short duration or a long duration;
(b) most tasks have short durations; and (c) most
resources are consumed by a few tasks with long
duration that have large demands for CPU and memory.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2010:SIQ,
author = "Martin Arlitt and Keith Farkas and Subu Iyer and
Preethi Kumaresan and Sandro Rafaeli",
title = "Systematically improving the quality of {IT}
utilization data",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "42--49",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Efforts to reduce the cost of ownership for enterprise
IT environments are spurring the development and
deployment of data-driven management tools. Yet, IT
data is imperfect and these imperfections can lead to
inappropriate decisions that have significant technical
and business consequences. In this paper, we begin by
raising awareness of this problem through examples of
the imperfections that occur, and a discussion of their
causes and implications on IT management tasks. We then
introduce a systematic approach for addressing such
imperfections. Our approach allows best practices to be
readily shared, simplifies the construction of IT data
assurance solutions, and allows context-specific
corrections to be applied until the root cause(s) of
the imperfections can be fixed. To demonstrate the
value of our solution, we describe a capacity planning
use case. Application of our solution to an ongoing
capacity planning effort reduced the (human) planner's
time requirements by &\#8776;3x to &\#8776;6 hours,
while enabling him to evaluate the data quality of
&\#8776;5x more applications and for 9 imperfection
types rather than 1.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:2010:PMI,
author = "Jianying Hu and Yingdong Lu and Aleksandra
Mojsilovi{\'c} and Mayank Sharma and Mark S.
Squillante",
title = "Performance management of {IT} services delivery",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "50--57",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2010:BPI,
author = "Shuyi Chen and Kaustubh R. Joshi and Matti A. Hiltunen
and Richard D. Schlichting and William H. Sanders",
title = "Blackbox prediction of the impact of {DVFS} on
end-to-end performance of multitier systems",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "59--63",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic voltage and frequency scaling (DVFS) is a
well-known technique for gaining energy savings on
desktop and laptop computers. However, its use in
server settings requires careful consideration of any
potential impacts on end-to-end service performance of
hosted applications. In this paper, we develop a simple
metric called the \frequency gradient' that allows
prediction of the impact of changes in processor
frequency on the end-to-end transaction response times
of multitier applications. We show how frequency
gradients can be measured on a running system in a
push-button manner without any prior knowledge of
application semantics, structure, or configuration
settings. Using experimental results, we demonstrate
that the frequency gradients provide accurate
predictions, and enable end-to-end performance-aware
DVFS for multitier applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marwah:2010:QSI,
author = "Manish Marwah and Paulo Maciel and Amip Shah and
Ratnesh Sharma and Tom Christian and Virgilio Almeida
and Carlos Ara{\'u}jo and Erica Souza and Gustavo
Callou and Bruno Silva and S{\'e}rgio Galdino and Jose
Pires",
title = "Quantifying the sustainability impact of data center
availability",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "64--68",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Data center availability is critical considering the
explosive growth in Internet services and people's
dependence on them. Furthermore, in recent years,
sustainability has become important. However, data
center designers have little information on the
sustainability impact of data center availability
architectures. In this paper, we present an approach to
estimate the sustainability impact of such
architectures. Availability is computed using
Stochastic Petri Net (SPN) models while an energy-based
lifecycle assessment (LCA) approach is used for
quantifying sustainability impact. The approach is
demonstrated on real life data center power
infrastructure architectures. Five different
architectures are considered and initial results show
that quantification of sustainability impact provides
important information to a data center designer in
evaluating availability architecture choices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "availability; data center; life-cycle assessment;
power infrastructure; stochastic Petri net;
sustainability",
}
@Article{Marsan:2010:EEM,
author = "Marco Ajmone Marsan and Michela Meo",
title = "Energy efficient management of two cellular access
networks",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "69--73",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we evaluate the energy saving that can
be achieved with the energy-aware cooperative
management of the cellular access networks of two
operators offering service over the same area. We
evaluate the amount of energy that can be saved by
using both networks in high traffic conditions, but
switching off one of the two during the periods when
traffic is so low that the desired quality of service
can be obtained with just one network. When one of the
two networks is off, its customers are allowed to roam
over the one that is on. Several alternatives are
studied, as regards the switch-off pattern: the one
that balances the switch-off frequencies, the one that
balances roaming costs, the one that balances energy
savings, and the one that maximizes the amount of saved
energy. Our results indicate that a huge amount of
energy can be saved, and suggest that, to reduce energy
consumption, new cooperative attitudes of the operators
should be encouraged with appropriate incentives, or
even enforced by regulation authorities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsiaflakis:2010:FGD,
author = "Paschalis Tsiaflakis and Yung Yi and Mung Chiang and
Marc Moonen",
title = "Fair greening for {DSL} broadband access",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "74--78",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Given that broadband access networks are an integral
part of the ICT infrastructure and that DSL is the most
widely deployed broadband access technology, greening
DSL has become important. Our recent work demonstrated
a promising tradeoff between data rate performance and
energy conservation. However, more greening still
implies possibly lower data rate, and allocating this
'price of greening' across interfering users needs to
be fair. This paper proposes four formulations of fair
greening in interference-limited networks, unifies them
into one general representation, and develops a unified
algorithm to solve them effectively. Simulations
quantify the intuitions on fairness in greening DSL, as
these four alternative approaches offer a range of
choices between maintaining a high sum data rate and
enforcing various definitions of fairness. Fairness of
allocating the price of greening is also interesting in
its own right.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ord:2010:PEM,
author = "Jason Ord and Ellen Chappell and Scott Canonico and
Tim Strecker",
title = "Product environmental metrics for printers",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "79--83",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Hewlett--Packard's Imaging &\#38; Printing Group (IPG)
is charting a course towards environmental leadership
in its markets. To do this, IPG must look beyond just
satisfying the regulations and identify opportunities
for groundbreaking improvement. Carefully designed
metrics are necessary to guide design, chart progress
and set goals in this effort. IPG's Environmental
Strategy Team is leading an initiative to establish
these metrics internally. This paper describes the
development process the authors followed to construct
the initial metrics, which are focused on the 'carbon
footprint' of products under development. The paper
also discusses the lessons learned developing the
initial metrics, the results achieved thus far,
implementation details, challenges, and future
opportunities for improvement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "carbon footprint; environmental performance
measurement; environmental product metrics; printers;
printing",
}
@Article{Cayzer:2010:SHI,
author = "Steve Cayzer and Chris Preist",
title = "The sustainability hub: an information management tool
for analysis and decision making",
journal = j-SIGMETRICS,
volume = "37",
number = "4",
pages = "84--88",
month = mar,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1773394.1773409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:13 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sustainability is becoming an increasingly important
driver for which decision makers -- consumers,
corporate and government -- rely on principled,
accurate and provenanced metrics to make appropriate
behavior changes. Our assertion here is that a
Sustainability Hub which manages such metrics together
with their context and chains of reasoning will be of
great benefit to the global community. In this paper we
explain the Hub vision and explain its triple value
proposition of context, chains of reasoning and
community. We propose a data model and describe our
existing prototype.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "chains of reasoning; community; context; information
management; metrics; provenance; sustainability",
}
@Article{Thereska:2010:PPM,
author = "Eno Thereska and Bjoern Doebel and Alice X. Zheng and
Peter Nobel",
title = "Practical performance models for complex, popular
applications",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "1--12",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811041",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Perhaps surprisingly, no practical performance models
exist for popular (and complex) client applications
such as Adobe's Creative Suite, Microsoft's Office and
Visual Studio, Mozilla, Halo 3, etc. There is currently
no tool that automatically answers program developers',
IT administrators' and end-users' simple what-if
questions like 'what happens to the performance of my
favorite application X if I upgrade from Windows Vista
to Windows 7?'. This paper describes our approach
towards constructing practical, versatile performance
models to address this problem. The goal is to have
these models be useful for application developers to
help expand application testing coverage and for IT
administrators to assist with understanding the
performance consequences of a software, hardware or
configuration change.\par
This paper's main contributions are in system building
and performance modeling. We believe we have built
applications that are easier to model because we have
proactively instrumented them to export their state and
associated metrics. This application-specific
monitoring is always on and interesting data is
collected from real, 'in-the-wild' deployments. The
models we are experimenting with are based on
statistical techniques. They require no modifications
to the OS or applications beyond the above
instrumentation, and no explicit a priori model on how
an OS or application should behave. We are in the
process of learning from models we have constructed for
several Microsoft products, including the Office suite,
Visual Studio and Media Player. This paper presents
preliminary findings from a large user deployment
(several hundred thousand user sessions) of these
applications that show the coverage and limitations of
such models. These findings pushed us to move beyond
averages/means and go into some depth into why client
application performance has an inherently large
variance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "developers; IT administrators; performance variance;
what-if",
}
@Article{Gast:2010:MFM,
author = "Nicolas Gast and Gaujal Bruno",
title = "A mean field model of work stealing in large-scale
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "13--24",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811042",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider a generic model of
computational grids, seen as several clusters of
homogeneous processors. In such systems, a key issue
when designing efficient job allocation policies is to
balance the workload over the different
resources.\par
We present a Markovian model for performance evaluation
of such a policy, namely work stealing (idle processors
steal work from others) in large-scale heterogeneous
systems. Using mean field theory, we show that when the
size of the system grows, it converges to a system of
deterministic ordinary differential equations that
allows one to compute the expectation of performance
functions (such as average response times) as well as
the distributions of these functions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "grid computing; load balancing; mean field",
}
@Article{Balsamo:2010:UAP,
author = "Simonetta Balsamo and Peter G. Harrison and Andrea
Marin",
title = "A unifying approach to product-forms in networks with
finite capacity constraints",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "25--36",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811043",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In queueing networks with blocking, stations wishing
to transmit customers to a full queue are blocked and
need to take alternative action on completing a
service. In general, product-forms, i.e. separable
solutions for such a network's equilibrium state
probabilities, do not exist but some product-forms have
been obtained over the years in special cases, using a
variety of techniques. We show that the Reversed
Compound Agent Theorem (RCAT) can obtain these diverse
results in a uniform way by its direct application, so
unifying product-forms in networks with and without
blocking. New product-forms are also constructed for a
type of blocking we call `skipping', where a blocked
station sends its output-customers to the queue after
the one causing the blocking in that customer's path.
Finally, we investigate a novel congestion management
scheme for networks of finite-capacity queues in which
a station with a full queue transmits signals that
delete customers from upstream queues in order to
reduce incoming traffic.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "product-form solutions; queueing theory",
}
@Article{Andrew:2010:OFR,
author = "Lachlan L. H. Andrew and Minghong Lin and Adam
Wierman",
title = "Optimality, fairness, and robustness in speed scaling
designs",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "37--48",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811044",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work examines fundamental tradeoffs incurred by a
speed scaler seeking to minimize the sum of expected
response time and energy use per job. We prove that a
popular speed scaler is 2-competitive for this
objective and no 'natural' speed scaler can do better.
Additionally, we prove that energy-proportional speed
scaling works well for both Shortest Remaining
Processing Time (SRPT) and Processor Sharing (PS) and
we show that under both SRPT and PS, gated-static speed
scaling is nearly optimal when the mean workload is
known, but that dynamic speed scaling provides
robustness against uncertain workloads. Finally, we
prove that speed scaling magnifies unfairness under
SRPT but that PS remains fair under speed scaling.
These results show that these speed scalers can achieve
any two, but only two, of optimality, fairness, and
robustness.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "energy; fairness; PS; robustness; scheduling; speed
scaling; SRPT",
}
@Article{Dong:2010:EEE,
author = "Wei Dong and Yunhao Liu and Xiaofan Wu and Lin Gu and
Chun Chen",
title = "{Elon}: enabling efficient and long-term reprogramming
for wireless sensor networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "49--60",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811046",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a new mechanism called Elon for enabling
efficient and long-term reprogramming in wireless
sensor networks. Elon reduces the transferred code size
significantly by introducing the concept of replaceable
component. It avoids the cost of hardware reboot with a
novel software reboot mechanism. Moreover, it
significantly prolongs the reprogramming lifetime by
avoiding flash writes for TelosB nodes. Experimental
results show that Elon transfers up to 120--389 times
less information than Deluge, and 18-42 times less
information than Stream. The software reboot mechanism
that Elon applies reduces the rebooting cost by
50.4\%-53.87\% in terms of beacon packets, and 56.83\%
in terms of unsynchronized nodes. In addition, Elon
prolongs the reprogramming lifetime by a factor of
2.3.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "component; reboot; reprogramming; wireless sensor
network",
}
@Article{Karbasi:2010:DSN,
author = "Amin Karbasi and Sewoong Oh",
title = "Distributed sensor network localization from local
connectivity: performance analysis for the
{HOP-TERRAIN} algorithm",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "61--70",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811047",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the problem of determining the
node locations in ad-hoc sensor networks when only
connectivity information is available. In previous
work, we showed that the localization algorithm MDS-MAP
proposed by Y. Shang et al. is able to localize sensors
up to a bounded error decreasing at a rate inversely
proportional to the radio range r. The main limitation
of MDS-MAP is the assumption that the available
connectivity information is processed in a centralized
way.\par
In this work we investigate a practically important
question whether similar performance guarantees can be
obtained in a distributed setting. In particular, we
analyze the performance of the HOP-TERRAIN algorithm
proposed by C. Savarese et al. This algorithm can be
seen as a distributed version of the MDS-MAP algorithm.
More precisely, assume that the radio range r=o(1) and
that the network consists of n sensors positioned
randomly on a d-dimensional unit cube and d+1 anchors
in general positions. We show that when only
connectivity information is available, for every
unknown node i, the Euclidean distance between the
estimate x$_i$ and the correct position x$_i$ is
bounded by ||x$_i$ -x$_i$ ||",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed; localization; sensor network",
}
@Article{Xu:2010:SSP,
author = "Kuang Xu and Olivier Dousse and Patrick Thiran",
title = "Self-synchronizing properties of {CSMA} wireless
multi-hop networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "71--82",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811048",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We show that CSMA is able to spontaneously synchronize
transmissions in a wireless network with constant-size
packets, and that this property can be used to devise
efficient synchronized CSMA scheduling mechanisms
without message passing. Using tools from queuing
theory, we prove that for any connected wireless
networks with arbitrary interference constraints, it is
possible to implement self-synchronizing TDMA schedules
without any explicit message passing or clock
synchronization besides transmitting the original data
packets, and the interaction can be fully local in that
each node decides when to transmit next only by
overhearing its neighbors' transmissions. We also
provide a necessary and sufficient condition on the
emergence of self-synchronization for a given TDMA
schedule, and prove that such conditions for
self-synchronization can be checked in a finite number
of steps for a finite network topology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "scheduling algorithm; self-synchronization; stochastic
recursive sequence",
}
@Article{Moallemi:2010:FLD,
author = "Ciamac Moallemi and Devavrat Shah",
title = "On the flow-level dynamics of a packet-switched
network",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "83--94",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811050",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The packet is the fundamental unit of transportation
in modern communication networks such as the Internet.
Physical layer scheduling decisions are made at the
level of packets, and packet-level models with
exogenous arrival processes have long been employed to
study network performance, as well as design scheduling
policies that more efficiently utilize network
resources. On the other hand, a user of the network is
more concerned with end-to-end bandwidth, which is
allocated through congestion control policies such as
TCP. Utility-based flow-level models have played an
important role in understanding congestion control
protocols. In summary, these two classes of models have
provided separate insights for flow-level and
packet-level dynamics of a network. In this paper, we
wish to study these two dynamics together. We propose a
joint flow-level and packet-level stochastic model for
the dynamics of a network, and an associated policy for
congestion control and packet scheduling that is based
on alpha-weighted policies from the literature. We
provide a fluid analysis for the model that establishes
the throughput optimality of the proposed policy, thus
validating prior insights based on separate
packet-level and flow-level models. By analyzing a
critically scaled fluid model under the proposed
policy, we provide constant factor performance bounds
on the delay performance and characterize the invariant
states of the system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; flow-level model; maximum weight;
packet-level model; scheduling; utility maximization",
}
@Article{Godfrey:2010:ICD,
author = "P. Brighten Godfrey and Michael Schapira and Aviv
Zohar and Scott Shenker",
title = "Incentive compatibility and dynamics of congestion
control",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "95--106",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811051",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "his paper studies under what conditions congestion
control schemes can be both efficient, so that capacity
is not wasted, and incentive compatible, so that each
participant can maximize its utility by following the
prescribed protocol. We show that both conditions can
be achieved if routers run strict priority queueing
(SPQ) or weighted fair queueing (WFQ) and end-hosts run
any of a family of protocols which we call Probing
Increase Educated Decrease (PIED). A natural question
is whether incentive compatibility and efficiency are
possible while avoiding the per-flow processing of WFQ.
We partially address that question in the negative by
showing that any policy satisfying a certain 'locality'
condition cannot guarantee both properties.\par
Our results also have implication for convergence to
some steady-state throughput for the flows. Even when
senders transmit at a fixed rate (as in a UDP flow
which does not react to congestion), feedback effects
among the routers can result in complex dynamics which
do not appear in the simple topologies studied in past
work.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion control; incentives; queueing; TCP",
}
@Article{Shah:2010:DCG,
author = "Devavrat Shah and Jinwoo Shin",
title = "Dynamics in congestion games",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "107--118",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811052",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Game theoretic modeling and equilibrium analysis of
congestion games have provided insights in the
performance of Internet congestion control, road
transportation networks, etc. Despite the long history,
very little is known about their transient (non
equilibrium) performance. In this paper, we are
motivated to seek answers to questions such as how long
does it take to reach equilibrium, when the system does
operate near equilibrium in the presence of dynamics,
e.g. nodes join or leave, or the tradeoff between
performance and the rate of dynamics. In this pursuit,
we provide three contributions in this paper. First, a
novel probabilistic model to capture realistic
behaviors of agents allowing for the possibility of
arbitrariness in conjunction with rationality. Second,
evaluation of (a) time to converge to equilibrium under
this behavior model and (b) distance to Nash
equilibrium. Finally, determination of tradeoff between
the rate of dynamics and quality of performance
(distance to equilibrium) which leads to an interesting
uncertainty principle. The novel technical ingredients
involve analysis of logarithmic Sobolov constant of
Markov process with time varying state space and
methodically this should be of broader interest in the
context of dynamical systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "congestion game; logarithmic Sobolov constant;
logit-response",
}
@Article{Xiang:2010:ORS,
author = "Liping Xiang and Yinlong Xu and John C. S. Lui and
Qian Chang",
title = "Optimal recovery of single disk failure in {RDP} code
storage systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "119--130",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811054",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Modern storage systems use thousands of inexpensive
disks to meet the storage requirement of applications.
To enhance the data availability, some form of
redundancy is used. For example, conventional RAID-5
systems provide data availability for single disk
failure only, while recent advanced coding techniques
such as row-diagonal parity (RDP) can provide data
availability with up to two disk failures. To reduce
the probability of data unavailability, whenever a
single disk fails, disk recovery (or rebuild) will be
carried out. We show that conventional recovery scheme
of RDP code for a single disk failure is inefficient
and suboptimal. In this paper, we propose an optimal
and efficient disk recovery scheme, Row-Diagonal
Optimal Recovery (RDOR), for single disk failure of RDP
code that has the following properties: (1) it is read
optimal in the sense that it issues the smallest number
of disk reads to recover the failed disk; (2) it has
the load balancing property that all surviving disks
will be subjected to the same amount of additional
workload in rebuilding the failed disk. We carefully
explore the design state space and theoretically show
the optimality of RDOR. We carry out performance
evaluation to quantify the merits of RDOR on some
widely used disks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "disk failure; raid recovery; RDP code; recovery
algorithm",
}
@Article{Ghanbari:2010:QLR,
author = "Saeed Ghanbari and Gokul Soundararajan and Cristiana
Amza",
title = "A query language and runtime tool for evaluating
behavior of multi-tier servers",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "131--142",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811055",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "As modern multi-tier systems are becoming increasingly
large and complex, it becomes more difficult for system
analysts to understand the overall behavior of the
system, and diagnose performance problems. To assist
analysts inspect performance behavior, we introduce
SelfTalk, a novel declarative language that allows
analysts to query and understand the status of a large
scale system. SelfTalk is sufficiently expressive to
encode an analyst's high-level hypotheses about system
invariants, normal correlations between system metrics,
or other a priori derived performance models, such as,
'I expect that the throughputs of interconnected system
components are linearly correlated'. Given a
hypothesis, Dena, our runtime support system,
instantiates and validates it using actual monitoring
data within specific system configurations. We evaluate
SelfTalk/Dena by posing several hypotheses about system
behavior and querying Dena to validate system behavior
in a multi-tier dynamic content server. We find that
Dena automatically validates the system performance
based on the pre-existing hypotheses and helps to
diagnose system misbehavior.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "expectation; hypothesis; management; performance
models",
}
@Article{Goel:2010:SSQ,
author = "Ashish Goel and Pankaj Gupta",
title = "Small subset queries and bloom filters using ternary
associative memories, with applications",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "143--154",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811056",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Associative memories offer high levels of parallelism
in matching a query against stored entries. We design
and analyze an architecture which uses {\em single\/}
lookup into a Ternary Content Addressable Memory (TCAM)
to solve the subset query problem for small sets, i.e.,
to check whether a given set (the query) contains (or
alternately, is contained in) any one of a large
collection of sets in a database. We use each TCAM
entry as a small Ternary Bloom Filter (each 'bit' of
which is one of {0,1,wildcard}) to store one of the
sets in the collection. Like Bloom filters, our
architecture is susceptible to false positives. Since
each TCAM entry is quite small, asymptotic analyses of
Bloom filters do not directly apply. Surprisingly, we
are able to show that the asymptotic false positive
probability formula can be safely used if we penalize
the small Bloom filter by taking away just one bit of
storage and adding just half an extra set element
before applying the formula. We believe that this
analysis is independently interesting. The subset query
problem has applications in databases, network
intrusion detection, packet classification in Internet
routers, and Information Retrieval. We demonstrate our
architecture on one illustrative streaming application
-- intrusion detection in network traffic. Be shingling
(i.e., taking consecutive bytes of) the strings in the
database, we can perform a single subset query and
hence a single TCAM search, to skip many bytes in the
stream. We evaluate our scheme on the open source CLAM
anti-virus database, for {\em worst-case\/} as well as
random streams. Our architecture appears to be at least
one order of magnitude faster than previous approaches.
Since the individual Bloom filters must fit in a single
TCAM entry (currently 72 to 576 bits), our solution
applies only when each set is of a small cardinality.
However, this is sufficient for many typical
applications. Also, recent algorithms for the
subset-query problem use a small-set version as a
subroutine",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bloom filters; subset queries; TCAM",
}
@Article{Laadan:2010:TLA,
author = "Oren Laadan and Nicolas Viennot and Jason Nieh",
title = "Transparent, lightweight application execution replay
on commodity multiprocessor operating systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "155--166",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811057",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present Scribe, the first system to provide
transparent, low-overhead application record-replay and
the ability to go live from replayed execution. Scribe
introduces new lightweight operating system mechanisms,
rendezvous and sync points, to efficiently record
nondeterministic interactions such as related system
calls, signals, and shared memory accesses. Rendezvous
points make a partial ordering of execution based on
system call dependencies sufficient for replay,
avoiding the recording overhead of maintaining an exact
execution ordering. Sync points convert asynchronous
interactions that can occur at arbitrary times into
synchronous events that are much easier to record and
replay.\par
We have implemented Scribe without changing, relinking,
or recompiling applications, libraries, or operating
system kernels, and without any specialized hardware
support such as hardware performance counters. It works
on commodity Linux operating systems, and commodity
multi-core and multiprocessor hardware. Our results
show for the first time that an operating system
mechanism can correctly and transparently record and
replay multi-process and multi-threaded applications on
commodity multiprocessors. Scribe recording overhead is
less than 2.5\% for server applications including
Apache and MySQL, and less than 15\% for desktop
applications including Firefox, Acrobat, OpenOffice,
parallel kernel compilation, and movie playback.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "debugging; fault-tolerance; record-replay;
virtualization",
}
@Article{Ni:2010:CSP,
author = "Jian Ni and R. Srikant and Xinzhou Wu",
title = "Coloring spatial point processes with applications to
peer discovery in large wireless networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "167--178",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811059",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study distributed channel assignment
in wireless networks with applications to peer
discovery in ad hoc wireless networks. We model channel
assignment as a coloring problem for spatial point
processes in which n nodes are located in a unit cube
uniformly at random and each node is assigned one of K
colors, where each color represents a channel. The
objective is to maximize the spatial separation between
nodes of the same color. In general, it is hard to
derive the optimal coloring algorithm and therefore, we
consider a natural greedy coloring algorithm, first
proposed in [5]. We prove two key results: (i) with
just a small number of colors when K is roughly of the
order of log(n) loglog(n), the distance separation
achieved by the greedy coloring algorithm
asymptotically matches the optimal distance separation
that can be achieved by an algorithm which is allowed
to select the locations of the nodes but is allowed to
use only one color, and (ii) when K = Omega(log(n)),
the greedy coloring algorithm asymptotically achieves
the best distance separation that can be achieved by an
algorithm which is allowed to both optimally color and
place nodes. The greedy coloring algorithm is also
shown to dramatically outperform a simple random
coloring algorithm. Moreover, the results continue to
hold under node mobilities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "channel assignment; coloring algorithms; spatial point
processes; wireless networks",
}
@Article{vandeVen:2010:OTB,
author = "Peter M. van de Ven and Augustus J. E. M. Janssen and
Johan S. H. van Leeuwaarden",
title = "Optimal tradeoff between exposed and hidden nodes in
large wireless networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "179--190",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless networks equipped with the CSMA protocol are
subject to collisions due to interference. For a given
interference range we investigate the tradeoff between
collisions (hidden nodes) and unused capacity (exposed
nodes). We show that the sensing range that maximizes
throughput critically depends on the activation rate of
nodes. For infinite line networks, we prove the
existence of a threshold: When the activation rate is
below this threshold the optimal sensing range is small
(to maximize spatial reuse). When the activation rate
is above the threshold the optimal sensing range is
just large enough to preclude all collisions.
Simulations suggest that this threshold policy extends
to more complex linear and non-linear topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "carrier-sensing range; exposed nodes; hidden nodes;
Markov processes; multi-access; throughput; wireless
networks",
}
@Article{Liu:2010:SMW,
author = "Shihuan Liu and Lei Ying and R. Srikant",
title = "Scheduling in multichannel wireless networks with
flow-level dynamics",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "191--202",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies scheduling in multichannel wireless
networks with flow-level dynamics. We consider a
downlink network with a single base station, M channels
(frequency bands), and multiple mobile users (flows).
We also assume mobiles dynamically join the network to
receive finite-size files and leave after downloading
the complete files. A recent study [16] has shown that
the MaxWeight algorithm fails to be throughput-optimal
under this flow-level dynamics. The main contribution
of this paper is the development of joint
channel-assignment and workload-based scheduling
algorithms for multichannel downlink networks with
dynamic flow arrivals/departures. We prove that these
algorithms are throughput-optimal. Our simulations
further demonstrate that a hybrid channel-assignment
and workload-based scheduling algorithm significantly
improves the network performance (in terms of both
file-transfer delay and blocking probability) compared
to the existing algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "flow-level dynamics; multichannel downlink network;
wireless scheduling",
}
@Article{Shah:2010:DSC,
author = "Devavrat Shah and Tauhid Zaman",
title = "Detecting sources of computer viruses in networks:
theory and experiment",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "203--214",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We provide a systematic study of the problem of
finding the source of a computer virus in a network. We
model virus spreading in a network with a variant of
the popular SIR model and then construct an estimator
for the virus source. This estimator is based upon a
novel combinatorial quantity which we term rumor
centrality. We establish that this is an ML estimator
for a class of graphs. We find the following surprising
threshold phenomenon: on trees which grow faster than a
line, the estimator always has non-trivial detection
probability, whereas on trees that grow like a line,
the detection probability will go to 0 as the network
grows. Simulations performed on synthetic networks such
as the popular small-world and scale-free networks, and
on real networks such as an Internet AS network and the
U.S. electric power grid network, show that the
estimator either finds the source exactly or within a
few hops in different network topologies. We compare
rumor centrality to another common network centrality
notion known as distance centrality. We prove that on
trees, the rumor center and distance center are
equivalent, but on general networks, they may differ.
Indeed, simulations show that rumor centrality
outperforms distance centrality in finding virus
sources in networks which are not tree-like.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "epidemics; estimation",
}
@Article{Misra:2010:IPA,
author = "Vishal Misra and Stratis Ioannidis and Augustin
Chaintreau and Laurent Massouli{\'e}",
title = "Incentivizing peer-assisted services: a fluid
{Shapley} value approach",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "215--226",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A new generation of content delivery networks for live
streaming, video on demand, and software updates takes
advantage of a peer-to-peer architecture to reduce
their operating cost. In contrast with previous
uncoordinated peer-to-peer schemes, users opt-in to
dedicate part of the resources they own to help the
content delivery, in exchange for receiving the same
service at a reduced price. Such incentive mechanisms
are appealing, as they simplify coordination and
accounting. However, they also increase a user's
expectation that she will receive a fair price for the
resources she provides. Addressing this issue carefully
is critical in ensuring that all interested
parties--including the provider--are willing to
participate in such a system, thereby guaranteeing its
stability.\par
In this paper, we take a cooperative game theory
approach to identify the ideal incentive structure that
follows the axioms formulated by Lloyd Shapley. This
ensures that each player, be it the provider or a peer,
receives an amount proportional to its contribution and
bargaining power when entering the game. In general,
the drawback of this ideal incentive structure is its
computational complexity. However, we prove that as the
number of peers receiving the service becomes large,
the Shapley value received by each player approaches a
fluid limit. This limit follows a simple closed form
expression and can be computed in several scenarios of
interest: by applying our technique, we show that
several peer-assisted services, deployed on both wired
and wireless networks, can benefit from important cost
and energy savings with a proper incentive structure
that follows simple compensation rules.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cooperative game theory; incentive mechanisms",
}
@Article{Ma:2010:LPM,
author = "Yadi Ma and Suman Banerjee and Shan Lu and Cristian
Estan",
title = "Leveraging parallelism for multi-dimensional packet
classification on software routers",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "227--238",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811065",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a software-based solution to the
multi-dimensional packet classification problem which
can operate at high line speeds, e.g., in excess of 10
Gbps, using high-end multi-core desktop platforms
available today. Our solution, called Storm, leverages
a common notion that a subset of rules are likely to be
popular over short durations of time. By identifying a
suitable set of popular rules one can significantly
speed up existing software-based classification
algorithms. A key aspect of our design is in
partitioning processor resources into various relevant
tasks, such as continuously computing the popular rules
based on a sampled subset of traffic, fast
classification for traffic that matches popular rules,
dealing with packets that do not match the most popular
rules, and traffic sampling. Our results show that by
using a single 8-core Xeon processor desktop platform,
it is possible to sustain classification rates of more
than 15 Gbps for representative rule sets of size in
excess of 5-dimensional 9000 rules, with no packet
losses. This performance is significantly superior to a
8-way implementation of a state-of-the-art packet
classification software system running on the same
8-core machine. Therefore, we believe that our design
of packet classification functions can be a useful
classification building block for RouteBricks-style
designs, where a core router might be constructed as a
mesh of regular desktop machines.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "packet classification; parallelism; storm",
}
@Article{Shah:2010:QPW,
author = "Devavrat Shah and John N. Tsitsiklis and Yuan Zhong",
title = "Qualitative properties of $ \alpha $-weighted
scheduling policies",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "239--250",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811067",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a switched network, a fairly general
constrained queueing network model that has been used
successfully to model the detailed packet-level
dynamics in communication networks, such as
input-queued switches and wireless networks. The main
operational issue in this model is that of deciding
which queues to serve, subject to certain
constraints.\par
In this paper, we study qualitative performance
properties of the well known $ \alpha $-weighted
scheduling policies. The stability, in the sense of
positive recurrence, of these policies has been well
understood. We establish exponential upper bounds on
the tail of the steady-state distribution of the
backlog.\par
Along the way, we prove finiteness of the expected
steady-state backlog when $ \alpha < 1$, a property
that was known only for $ \alpha \geq 1$.\par
Finally, we analyze the excursions of the maximum
backlog over a finite time horizon for $ \alpha $ $
\geq $ 1. As a consequence, for $ \alpha $ $ \geq $ 1,
we establish the full state space collapse property.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "exponential bound; Markov chain; maximum weight-alpha;
state space collapse; switched network",
}
@Article{Casale:2010:CMD,
author = "Giuliano Casale and Ningfang Mi and Evgenia Smirni",
title = "{CWS}: a model-driven scheduling policy for correlated
workloads",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "251--262",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811068",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We define CWS, a non-preemptive scheduling policy for
workloads with correlated job sizes. CWS tackles the
scheduling problem by inferring the expected sizes of
upcoming jobs based on the structure of correlations
and on the outcome of past scheduling decisions. Size
prediction is achieved using a class of Hidden Markov
Models (HMM) with continuous observation densities that
describe job sizes. We show how the forward-backward
algorithm of HMMs applies effectively in scheduling
applications and how it can be used to derive
closed-form expressions for size prediction. This is
particularly simple to implement in the case of
observation densities that are phase-type (PH-type)
distributed, where existing fitting methods for
Markovian point processes may also simplify the
parameterization of the HMM workload model.\par
Based on the job size predictions, CWS emulates
size-based policies which favor short jobs, with
accuracy depending mainly on the HMM used to
parametrize the scheduling algorithm. Extensive
simulation and analysis illustrate that CWS is
competitive with policies that assume exact information
about the workload.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "correlated workload; model-driven scheduling; response
time; stochastic scheduling",
}
@Article{Zheng:2010:RAU,
author = "Haoqiang Zheng and Jason Nieh",
title = "{RSIO}: automatic user interaction detection and
scheduling",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "263--274",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811069",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present RSIO, a processor scheduling framework for
improving the response time of latency-sensitive
applications by monitoring accesses to I/O channels and
inferring when user interactions occur. RSIO
automatically identifies processes involved in a user
interaction and boosts their priorities at the time the
interaction occurs to improve system response time.
RSIO also detects processes indirectly involved in
processing an interaction, automatically accounting for
dependencies and boosting their priorities accordingly.
RSIO works with existing schedulers and requires no
application modifications to identify periods of
latency-sensitive application activity. We have
implemented RSIO in Linux and measured its
effectiveness on microbenchmarks and real applications.
Our results show that RSIO is easy to use and can
provide substantial improvements in system performance
for latency-sensitive applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dependencies; interactive applications; scheduling",
}
@Article{Bramson:2010:RLB,
author = "Maury Bramson and Yi Lu and Balaji Prabhakar",
title = "Randomized load balancing with general service time
distributions",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "275--286",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811071",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Randomized load balancing greatly improves the sharing
of resources in a number of applications while being
simple to implement. One model that has been
extensively used to study randomized load balancing
schemes is the supermarket model. In this model, jobs
arrive according to a rate-n\lambda Poisson process at
a bank of n rate-1 exponential server queues. A notable
result, due to Vvedenskaya {\em et.al.\/} (1996),
showed that when each arriving job is assigned to the
shortest of d $ \geq $ 2 randomly chosen queues, the
equilibrium queue sizes decay doubly exponentially in
the limit as n to $ \infty $. This is a substantial
improvement over the case d=1, where queue sizes decay
exponentially.\par
The method of analysis used in the above paper and in
the subsequent literature applies to jobs with
exponential service time distributions and does not
easily generalize. It is desirable to study load
balancing models with more general, especially
heavy-tailed, service time distributions since such
service times occur widely in practice.\par
This paper describes a modularized program for treating
randomized load balancing problems with general service
time distributions and service disciplines. The program
relies on an {\em ansatz\/} which asserts that any
finite set of queues in a randomized load balancing
scheme becomes independent as n to $ \infty $. This
allows one to derive queue size distributions and other
performance measures of interest. We establish the {\em
ansatz\/} when the service discipline is FIFO and the
service time distribution has a decreasing hazard rate
(this includes heavy-tailed service times). Assuming
the {\em ansatz}, we also obtain the following results:
(i) as n to $ \infty $, the process of job arrivals at
any fixed queue tends to a Poisson process whose rate
depends on the size of the queue, (ii) when the service
discipline at each server is processor sharing or LIFO
with preemptive resume, the distribution of the number
of jobs is insensitive to the service distribution, and
(iii) the tail behavior of the queue-size distribution
in terms of the service distribution for the FIFO
service discipline.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "asymptotic independence; load balancing; randomized
algorithms",
}
@Article{Ganesh:2010:LBR,
author = "Ayalvadi Ganesh and Sarah Lilienthal and D. Manjunath
and Alexandre Proutiere and Florian Simatos",
title = "Load balancing via random local search in closed and
open systems",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "287--298",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811072",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we analyze the performance of random
{\em load resampling and migration\/} strategies in
parallel server systems. Clients initially attach to an
arbitrary server, but may switch servers independently
at random instants of time in an attempt to improve
their service rate. This approach to load balancing
contrasts with traditional approaches where clients
make smart server selections upon arrival (e.g.,
Join-the-Shortest-Queue policy and variants thereof).
Load resampling is particularly relevant in scenarios
where clients cannot predict the load of a server
before being actually attached to it. An important
example is in wireless spectrum sharing where clients
try to share a set of frequency bands in a distributed
manner.\par
We first analyze the natural {\em Random Local Search
(RLS)\/} strategy. Under this strategy, after sampling
a new server randomly, clients only switch to it if
their service rate is improved. In closed systems,
where the client population is fixed, we derive tight
estimates of the time it takes under RLS strategy to
balance the load across servers. We then study open
systems where clients arrive according to a random
process and leave the system upon service completion.
In this scenario, we analyze how client migrations
within the system interact with the system dynamics
induced by client arrivals and departures. We compare
the load-aware RLS strategy to a load-oblivious
strategy in which clients just randomly switch server
without accounting for the server loads. Surprisingly,
we show that both load-oblivious and load-aware
strategies stabilize the system whenever this is at all
possible. We further demonstrate, using large-system
asymptotics, that the average client sojourn time under
the load-oblivious strategy is not considerably reduced
when clients apply smarter load-aware strategies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mean field asymptotics; stability analysis",
}
@Article{Zhao:2010:UMF,
author = "Haiquan (Chuck) Zhao and Cathy H. Xia and Zhen Liu and
Don Towsley",
title = "A unified modeling framework for distributed resource
allocation of general fork and join processing
networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "299--310",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811073",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper addresses the problem of distributed
resource allocation in general fork and join processing
networks. The problem is motivated by the complicated
processing requirements arising from distributed data
intensive computing. In such applications, the
underlying data processing software consists of a rich
set of semantics that include synchronous and
asynchronous data fork and data join. The different
types of semantics and processing requirements
introduce complex interdependence between various data
flows within the network.\par
We study the distributed resource allocation problem in
such systems with the goal of achieving the maximum
total utility of output streams. Past research has
dealt with networks with specific types of fork/join
semantics, but none of them included all four types. We
propose a novel modeling framework that can represent
all combinations of fork and join semantics, and
formulate the resource allocation problem as a convex
optimization problem on this model. We propose a
shadow-queue based decentralized iterative algorithm to
solve the resource allocation problem. We show that the
algorithm guarantees optimality and demonstrate through
simulation that it can adapt quickly to dynamically
changing environments.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithm; fork and join networks;
resource allocation",
}
@Article{Ioannidis:2010:DCH,
author = "Stratis Ioannidis and Laurent Massoulie and Augustin
Chaintreau",
title = "Distributed caching over heterogeneous mobile
networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "311--322",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811075",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sharing content over a mobile network through
opportunistic contacts has recently received
considerable attention.\par
In proposed scenarios, users store content they
download in a local cache and share it with other users
they meet, e.g., via Bluetooth or WiFi. The storage
capacity of mobile devices is typically limited;
therefore, identifying which content a user should
store in her cache is a fundamental problem in the
operation of any such content distribution
system.\par
In this work, we propose Psephos, a novel mechanism for
determining the caching policy of each mobile user.
Psephos is fully distributed: users compute their own
policies individually, in the absence of a central
authority. Moreover, it is designed for a heterogeneous
environment, in which demand for content, access to
resources, and mobility characteristics may vary across
different users. Most importantly, the caching policies
computed by our mechanism are optimal: we rigorously
show that Psephos maximizes the system's social
welfare. Our results are derived formally using
techniques from stochastic approximation and convex
optimization; to the best of our knowledge, our work is
the first to address caching with heterogeneity in a
fully distributed manner.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "caching; content distribution; heterogeneity;
opportunistic networks",
}
@Article{Antunes:2010:AFI,
author = "Nelson Antunes and Gon{\c{c}}alo Jacinto and
Ant{\'o}nio Pacheco",
title = "An analytical framework to infer multihop path
reliability in {MANETs}",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "323--332",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to complexity and intractability reasons, most of
the analytical studies on the reliability of
communication paths in mobile ad hoc networks are based
on the assumption of link independence. In this paper,
an analytical framework is developed to characterize
the random behavior of a multihop path and derive path
metrics to characterize the reliability of paths. This
is achieved through the modeling of a multihop path as
a PDMP (piecewise deterministic Markov process). Two
path based metrics are obtained as expectations of
functionals of the process: the mean path duration and
the path persistence. We show that these metrics are
the unique solution of a set of integro-differential
equations and provide a recursive scheme for their
computation. Finally, numerical results illustrate the
computation of the metrics; these results are compared
with independent link approximation results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "mobile ad hoc networks; mobility; multihop path
reliability; piecewise deterministic Markov processes;
random walk",
}
@Article{Coffman:2010:CFD,
author = "Ed Coffman and Philippe Robert and Florian Simatos and
Shuzo Tarumi and Gil Zussman",
title = "Channel fragmentation in dynamic spectrum access
systems: a theoretical study",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "333--344",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811039.1811077",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dynamic Spectrum Access systems exploit temporarily
available spectrum ('white spaces') and can spread
transmissions over a number of non-contiguous
sub-channels. Such methods are highly beneficial in
terms of spectrum utilization. However, excessive
fragmentation degrades performance and hence off-sets
the benefits. Thus, there is a need to study these
processes so as to determine how to ensure acceptable
levels of fragmentation. Hence, we present experimental
and analytical results derived from a mathematical
model. We model a system operating at capacity serving
requests for bandwidth by assigning a collection of
gaps (sub-channels) with no limitations on the fragment
size. Our main theoretical result shows that even if
fragments can be arbitrarily small, the system does not
degrade with time. Namely, the average total number of
fragments remains bounded. Within the very difficult
class of dynamic fragmentation models (including models
of storage fragmentation), this result appears to be
the first of its kind. Extensive experimental results
describe behavior, at times unexpected, of
fragmentation under different algorithms. Our model
also applies to dynamic linked-list storage allocation,
and provides a novel analysis in that domain. We prove
that, interestingly, the 50\% rule of the classical
(non-fragmented) allocation model carries over to our
model. Overall, the paper provides insights into the
potential behavior of practical fragmentation
algorithms.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "cognitive radio; dynamic spectrum access; ergodicity
of Markov chains; fragmentation",
}
@Article{Bermond:2010:DSA,
author = "Jean-Claude Bermond and Dorian Mazauric and Vishal
Misra and Philippe Nain",
title = "A distributed scheduling algorithm for wireless
networks with constant overhead and arbitrary binary
interference",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "345--346",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed algorithm; interference; stability;
transmission scheduling; wireless network",
}
@Article{Sagnol:2010:SOD,
author = "Guillaume Sagnol and Mustapha Bouhtou and St{\'e}phane
Gaubert",
title = "Successive $c$-optimal designs: a scalable technique
to optimize the measurements on large networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "347--348",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We propose a new approach to optimize the deployment
and the sampling rates of network monitoring tools,
such as Netflow, on a large IP network. It reduces to
solving a stochastic sequence of Second Order Cone
Programs. We validate our approach with experiments
relying on real data from a commercial network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "c-optimality; netflow; optimal experimental design;
SOCP",
}
@Article{Cuevas:2010:DDB,
author = "Rub{\'e}n Cuevas and Nikolaos Laoutaris and Xiaoyuan
Yang and Georgos Siganos and Pablo Rodriguez",
title = "Deep diving into {BitTorrent} locality",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "349--350",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A substantial amount of work has recently gone into
localizing BitTorrent traffic within an ISP in order to
avoid excessive and often times unnecessary transit
costs. In this work we aim to answer yet unanswered
questions such as: what is the minimum and the maximum
transit traffic reduction across hundreds of ISPs?,
what are the win-win boundaries for ISPs and their
users?, what is the maximum amount of transit traffic
that can be localized without requiring fine-grained
control of inter-AS overlay connections?, what is the
impact to transit traffic from upgrades of residential
broadband speeds?.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "BitTorrent; locality; measurements",
}
@Article{Jin:2010:IAN,
author = "Yu Jin and Nick Duffield and Patrick Haffner and
Subhabrata Sen and Zhi-Li Zhang",
title = "Inferring applications at the network layer using
collective traffic statistics",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "351--352",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we propose a novel technique for
inferring the distribution of application classes
present in the aggregated traffic flows between
endpoints, which exploits both the statistics of the
traffic flows, and the spatial distribution of those
flows across the network. Our method employs a two-step
supervised model, where the bootstrapping step provides
initial (inaccurate) inference on the traffic
application classes, and the graph-based calibration
step adjusts the initial inference through the
collective spatial traffic distribution. In evaluations
using real traffic flow measurements from a large ISP,
we show how our method can accurately classify
application types within aggregate traffic between
endpoints, even without the knowledge of ports and
other traffic features. While the bootstrap estimate
classifies the aggregates with 80\% accuracy,
incorporating spatial distributions through calibration
increases the accuracy to 92\%, i.e., roughly halving
the number of errors.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "application identification; collective traffic
statistics; graph-based calibration; two-step model",
}
@Article{Anselmi:2010:PAP,
author = "Jonatha Anselmi and Bruno Gaujal",
title = "The price of anarchy in parallel queues revisited",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "353--354",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811083",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a network of parallel, non-observable
queues and analyze the Price of Anarchy (PoA) from the
new point of view where the router has the memory of
previous dispatching choices. In the regime where the
demands grow with the network size, we provide an upper
bound on the PoA by means of convex programming. To
study the impact of non-Bernoulli routers, we introduce
the Price of Forgetting (PoF) and prove that it is
bounded from above by two.\par
Numerical experiments show that the benefit of having
memory in the router is independent of the network size
and heterogeneity, and monotonically depends on the
network load only.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "convex programming; parallel queues; price of anarchy;
price of forgetting",
}
@Article{Khouzani:2010:OPS,
author = "M. H. R. Khouzani and Saswati Sarkar and Eitan
Altman",
title = "Optimal propagation of security patches in mobile
wireless networks: extended abstract",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "355--356",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811084",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Reliable security measures against outbreaks of
malware is imperative to enable large scale
proliferation of wireless technologies. Immunization
and healing of the nodes through dissemination of
security patches can counter the spread of a malware
upon an epidemic outbreak. The distribution of patches
however burdens the bandwidth which is scarce in
wireless networks. The trade-offs between security
risks and resource consumption can be attained by
activating at any given time only fractions of
dispatchers and dynamically selecting their packet
transmission rates. We formulate the above trade-offs
as an optimal control problem that seek to minimize the
aggregate network costs that depend on security risks
and resource consumed by the countermeasures. Using
Pontryagin's maximum principle, we prove that the
dynamic control strategies have simple structures. When
the resource consumption cost is concave, optimal
strategy is to use maximum resources for distribution
of patches until a threshold time, upon which, the
patching should halt. When the resource consumption
cost is convex, the above transition is strict but
continuous.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "dynamic patching; optimal control;
security-performance trade-off",
}
@Article{Le:2010:MCE,
author = "Kien Le and Ozlem Bilgir and Ricardo Bianchini and
Margaret Martonosi and Thu D. Nguyen",
title = "Managing the cost, energy consumption, and carbon
footprint of {Internet} services",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "357--358",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811085",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The large amount of energy consumed by Internet
services represents significant and fast-growing
financial and environmental costs. This paper
introduces a general, optimization-based framework and
several request distribution policies that enable
multi-data-center services to manage their brown energy
consumption and leverage green energy, while respecting
their service-level agreements (SLAs) and minimizing
energy cost. Our policies can be used to abide by caps
on brown energy consumption that might arise from
various scenarios such as government imposed
Kyoto-style carbon limits. Extensive simulations and
real experiments show that our policies allow a service
to trade off consumption and cost. For example, using
our policies, a service can reduce brown energy
consumption by 24\% for only a 10\% increase in cost,
while still abiding by SLAs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "data center; energy cap; optimization; renewable
energy; request distribution",
}
@Article{Mishra:2010:CPM,
author = "Asit K. Mishra and Shekhar Srikantaiah and Mahmut
Kandemir and Chita R. Das",
title = "Coordinated power management of voltage islands in
{CMPs}",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "359--360",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811086",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multiple clock domain architectures have recently been
proposed to alleviate the power problem in CMPs by
having different frequency/voltage values assigned to
each domain based on workload requirements. However,
accurate allocation of power to these voltage/frequency
islands based on time varying workload characteristics
as well as controlling the power consumption at the
provisioned power level is non-trivial. Toward this
end, we propose a two-tier feedback-based control
theoretic solution. Our first-tier consists of a global
power manager that allocates power targets to
individual islands based on the workload dynamics. The
power consumptions of these islands are in turn
controlled by a second-tier, consisting of local
controllers that regulate island power using dynamic
voltage and frequency scaling in response to workload
requirements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "chip multiprocessors (CMP); control theory; DVFs;
GALs",
}
@Article{Nguyen:2010:RSA,
author = "Hung X. Nguyen and Matthew Roughan",
title = "Rigorous statistical analysis of {Internet} loss
measurements",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "361--362",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811087",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present a rigorous technique for
estimating confidence intervals of packet loss
measurements. Our approach is motivated by simple
observations that the loss process can be modelled as
an alternating renewal process. We use this structure
to build a Hidden Semi-Markov Model (HSMM) for the
measurement process, and from this estimate both loss
rates, and their confidence intervals. We use both
simulations and a set of more than 18000 hours of real
Internet measurements (between dedicated measurement
hosts, PlanetLab hosts, web and DNS servers) to
cross-validate our estimates, and show that they are
significantly more accurate than any current
alternative.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "accuracy; loss rate; performance measurement",
}
@Article{Osogami:2010:SOT,
author = "Takayuki Osogami and Rudy Raymond",
title = "Semidefinite optimization for transient analysis of
queues",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "363--364",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811088",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We derive an upper bound on the tail distribution of
the transient waiting time for the GI/GI/1 queue from a
formulation of semidefinite programming (SDP). Our
upper bounds are expressed in closed forms using the
first two moments of the service time and the
interarrival time. The upper bounds on the tail
distributions are integrated to obtain the upper bounds
on the corresponding expectations. We also extend the
formulation of the SDP, using the higher moments of the
service time and the interarrival time, and calculate
upper bounds and lower bounds numerically.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "bounds; duality; g/g/1 queue; moments; occupation
measure; semidefinite programming; transient",
}
@Article{Park:2010:CCF,
author = "Dongchul Park and Biplob Debnath and David Du",
title = "{CFTL}: a convertible flash translation layer adaptive
to data access patterns",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "365--366",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811089",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The flash translation layer (FTL) is a
software/hardware interface inside NAND flash memory.
Since FTL has a critical impact on the performance of
NAND flash-based devices, a variety of FTL schemes have
been proposed to improve their performance. In this
paper, we propose a novel hybrid FTL scheme named
Convertible Flash Translation Layer (CFTL). Unlike
other existing FTLs using static address mapping
schemes, CFTL is adaptive to data access patterns so
that it can dynamically switch its mapping scheme to
either a read-optimized or a write-optimized mapping
scheme. In addition to this convertible scheme, we
propose an efficient caching strategy to further
improve the CFTL performance with only a simple hint.
Consequently, both the convertible feature and the
caching strategy empower CFTL to achieve good read
performance as well as good write performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "CFTL; flash memory; flash translation layer; FTL",
}
@Article{Qian:2010:CUL,
author = "Feng Qian and Abhinav Pathak and Yu Charlie Hu and
Zhuoqing Morley Mao and Yinglian Xie",
title = "A case for unsupervised-learning-based spam
filtering",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "367--368",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811090",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "latent semantics analysis (LSA); spam campaign;
spamcampaignassassin (SCA); unsupervised learning",
}
@Article{Rajagopalan:2010:DAD,
author = "Shreevatsa Rajagopalan and Devavrat Shah",
title = "Distributed averaging in dynamic networks",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "369--370",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811091",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Distributed averaging is a well-studied problem, and
often a 'prototype' for a class of fundamental
questions arising in various disciplines. Previous work
has considered the effect of dynamics in the network
topology, in terms of changes in which communication
links are present. Here, we analyze the other forms of
dynamics, namely: changes in the values at the nodes,
and nodes joining or leaving the network.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "distributed averaging; distributed networks; dynamics;
message-passing",
}
@Article{Sarikaya:2010:PBP,
author = "Ruhi Sarikaya and Canturk Isci and Alper
Buyuktosunoglu",
title = "Program behavior prediction using a statistical metric
model",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "371--372",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811092",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Adaptive computing systems rely on predictions of
program behavior to understand and respond to the
dynamically varying application characteristics. This
study describes an accurate statistical workload metric
modeling scheme for predicting program phases. Our
evaluations demonstrate the superior performance of
this predictor over existing predictors on a wide range
of benchmarks. This prediction accuracy lends itself to
improved power-performance trade-offs when applied to
dynamic power management.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "computer architecture; monitoring and forecasting;
system performance measurement; workload
characterization",
}
@Article{Shah:2010:DOQ,
author = "Devavrat Shah and Jinwoo Shin",
title = "Delay optimal queue-based {CSMA}",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "373--374",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811093",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the past year or so, an exciting progress has led
to throughput optimal design of CSMA-based algorithms
for wireless networks. However, such an algorithm
suffers from very poor delay performance. A recent work
suggests that it is impossible to design a CSMA-like
simple algorithm that is throughput optimal and induces
low delay for any wireless network. However, wireless
networks arising in practice are formed by nodes
placed, possibly arbitrarily, in some geographic
area.\par
In this paper, we propose a CSMA algorithm with
per-node average-delay bounded by a constant,
independent of the network size, when the network has
geometry (precisely, polynomial growth structure) that
is present in {\em any\/} practical wireless network.
Two novel features of our algorithm, crucial for its
performance, are (a) choice of access probabilities as
an appropriate function of queue-sizes, and (b) use of
local network topological structures. Essentially, our
algorithm is a queue-based CSMA with a minor difference
that at each time instance a very small fraction of
{\em frozen\/} nodes do not execute CSMA. Somewhat
surprisingly, appropriate selection of such frozen
nodes, in a distributed manner, lead to the delay
optimal performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "Aloha; Markov chain; mixing time; wireless
multi-access",
}
@Article{Shye:2010:CMU,
author = "Alex Shye and Benjamin Scholbrock and Gokhan Memik and
Peter A. Dinda",
title = "Characterizing and modeling user activity on
smartphones: summary",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "375--376",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811094",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we present a comprehensive analysis of
real smartphone usage during a 6-month study of real
user activity on the Android G1 smartphone. Our goal is
to study the high-level characteristics of smartphone
usage, and to understand the implications on optimizing
smartphones, and their networks. Overall, we present 11
findings that cover general usage behavior, interaction
with the battery, power consumption, network activity,
frequently-run applications, and modeling usage
states.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "embedded systems; human factors",
}
@Article{Silveira:2010:DTA,
author = "Fernando Silveira and Christophe Diot and Nina Taft
and Ramesh Govindan",
title = "Detecting traffic anomalies using an equilibrium
property",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "377--378",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811095",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When many flows are multiplexed on a non-saturated
link, their volume changes over short timescales tend
to cancel each other out, making the average change
across flows close to zero. This equilibrium property
holds if the flows are nearly independent, and it is
violated by traffic changes caused by several
correlated flows. We exploit this empirical property to
design a computationally simple anomaly detection
method.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "anomaly detection; statistical test",
}
@Article{Soundararajan:2010:CSE,
author = "Niranjan Soundararajan and Anand Sivasubramaniam and
Vijay Narayanan",
title = "Characterizing the soft error vulnerability of
multicores running multithreaded applications",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "379--380",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811096",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multicores have become the platform of choice across
all market segments. Cost-effective protection against
soft errors is important in these environments, due to
the need to move to lower technology generations and
the exploding number of transistors on a chip. While
multicores offer the flexibility of varying the number
of application threads and the number of cores on which
they run, the reliability impact of choosing one
configuration over another is unclear. Our study
reveals that the reliability costs vary dramatically
between configurations and being unaware could lead to
a sub-optimal choice.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "fit rate; multicore; soft errors",
}
@Article{Tan:2010:CMM,
author = "Jian Tan and Wei Wei and Bo Jiang and Ness Shroff and
Don Towsley",
title = "Can multipath mitigate power law delays?: effects of
parallelism on tail performance",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "381--382",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811097",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "multipath; parallelism; power law; redundant
transmission; split transmission",
}
@Article{Tomozei:2010:DUP,
author = "Dan-Cristian Tomozei and Laurent Massouli{\'e}",
title = "Distributed user profiling via spectral methods",
journal = j-SIGMETRICS,
volume = "38",
number = "1",
pages = "383--384",
month = jun,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1811099.1811098",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Aug 25 07:35:52 MDT 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "User profiling is a useful primitive for constructing
personalized services, such as content recommendation.
In the present work we investigate the feasibility of
user profiling in a distributed setting, with no
central authority and only local information exchanges
between users. Our main contributions are: (i) We
propose a spectral clustering technique, and prove its
ability to recover unknown user profiles with only few
measures of affinity between users. (ii) We develop
distributed algorithms which achieve an embedding of
users into a low-dimensional space, based on spectral
transformation. These involve simple message passing
among users, and provably converge to the desired
embedding.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
keywords = "clustering; distributed spectral embedding; gossip",
}
@Article{George:2010:AAC,
author = "David K. George and Cathy H. Xia",
title = "Asymptotic analysis of closed queueing networks and
its implications to achievable service levels",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "3--5",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870180",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Buaic:2010:SBM,
author = "Ana Buaic and Varun Gupta and Jean Mairesse",
title = "Stability of the bipartite matching model",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "6--8",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870181",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2010:RWD,
author = "Ali Tizghadam and Alberto Leon-Garcia",
title = "On random walks in direction-aware network problems",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "9--11",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870182",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2010:ART,
author = "Minghong Lin and Adam Wierman and Bert Zwart",
title = "The average response time in a heavy-traffic {SRPT}
queue",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "12--14",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870183",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sigman:2010:HTL,
author = "Karl Sigman and Ward Whitt",
title = "Heavy-traffic limits for nearly deterministic queues",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "15--17",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870184",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ye:2010:DLT,
author = "Heng-Qing Ye and David D. Yao",
title = "Diffusion limit of a two-class network: stationary
distributions and interchange of limits",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "18--20",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870185",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nino-Mora:2010:IPA,
author = "Jos{\'e} Ni{\~n}o-Mora",
title = "Index policies for admission and routing of soft
real-time traffic to parallel queues",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "21--23",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870186",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Robert:2010:MFA,
author = "Philippe Robert and Jim Roberts",
title = "A mean field approximation for the capacity of
server-limited, gate-limited multi-server polling
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "24--26",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870187",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2010:FAL,
author = "Yunan Liu and Ward Whitt",
title = "A fluid approximation for large-scale service
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "27--29",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870188",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2010:MFL,
author = "Nicolas Gast and Bruno Gaujal",
title = "Mean field limit of non-smooth systems and
differential inclusions",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "30--32",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870189",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Radovanovic:2010:RMT,
author = "Ana Radovanovi{\'c} and Assaf Zeevi",
title = "Revenue maximization through ``smart'' inventory
management in reservation-based online advertising",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "33--35",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870190",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cho:2010:VFP,
author = "Jeong-woo Cho and Jean-Yves {Le Boudec} and Yuming
Jiang",
title = "On the validity of the fixed point equation and
decoupling assumption for analyzing the {802.11 MAC}
protocol",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "36--38",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870191",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance evaluation of the 802.11 MAC protocol is
classically based on the decoupling assumption, which
hypothesizes that the backoff processes at different
nodes are independent. A necessary condition for the
validity of this approach is the existence and
uniqueness of a solution to a fixed point equation.
However, it was also recently pointed out that this
condition is not sufficient; in contrast, a necessary
and sufficient condition is a global stability property
of the associated ordinary differential equation. Such
a property was established only for a specific case,
namely for a homogeneous system (all nodes have the
same parameters) and when the number of backoff stages
is either 1 or infinite and with other restrictive
conditions. In this paper, we give a simple condition
that establishes the validity of the decoupling
assumption for the homogeneous case. We also discuss
the heterogeneous and the differentiated service cases
and show that the uniqueness condition is not
sufficient; we exhibit one case where the fixed point
equation has a unique solution but the decoupling
assumption is not valid.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{vandeVen:2010:ETR,
author = "P. M. van de Ven and S. C. Borst and D. Denteneer and
A. J. E. M. Janssen and J. S. H. van Leeuwaarden",
title = "Equalizing throughputs in random-access networks",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "39--41",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870192",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marot:2010:RCP,
author = "Michel Marot and Vincent Gauthier",
title = "Reducing collision probability on a shared medium
using a variational method",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "42--44",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870193",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lu:2010:AMM,
author = "Yingdong Lu and Mark S. Squillante",
title = "On approximations for multiple multidimensional
stochastic knapsacks",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "45--47",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870194",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2010:DRM,
author = "Anshul Gandhi and Mor Harchol-Balter and Ivo Adan",
title = "Decomposition results for an {M/M/k} with staggered
setup",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "48--50",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870195",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we consider an M/M/k queueing system
with setup costs. Servers are turned off when there is
no work to do, but turning on an off server incurs a
setup cost. The setup cost takes the form of a time
delay and a power penalty. Setup costs are common in
manufacturing systems, data centers and disk farms,
where idle servers are turned off to save on operating
costs. Since servers in setup mode consume a lot of
power, the number of servers that can be in setup at
any time is often limited. In the staggered setup
model, at most one server can be in setup at any time.
While recent literature has analyzed an M/M/k system
with staggered setup and exponentially distributed
setup times, no closed-form solutions were obtained. We
provide the first analytical closed-form expressions
for the limiting distribution of the system states, the
distribution of response times, and the mean power
consumption for the above system. In particular, we
prove the following decomposition property: the
response time for an M/M/k system with staggered setup
is equal, in distribution, to the sum of response time
for an M/M/k system without setup, and the setup
time.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2010:EIS,
author = "Ranjan Pal and Leana Golubchik",
title = "On the economics of information security: the problem
of designing optimal cyber-insurance contracts",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "51--53",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870196",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dube:2010:RDC,
author = "Parijat Dube and Li Zhang",
title = "Resiliency of distributed clock synchronization
networks",
journal = j-SIGMETRICS,
volume = "38",
number = "2",
pages = "54--56",
month = sep,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1870178.1870197",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Tue Nov 23 12:59:22 MST 2010",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Clock synchronization refers to techniques and
protocols used to maintain mutually consistent
time-of-day clocks in a coordinated network of
computers. A (clock) synchronization network is an
interconnection of computers to implement a particular
clock synchronization solution. To prevent
clock-dependency loops, most synchronization networks
use a stratified approach which is essentially a tree
structure with a Primary Reference Clock (at
``stratum-0''). A node at stratum-$ i + 1 $ exchanges
synchronization messages with its parent node at
stratum-$i$ and also with some other nodes at the same
or other level. The purpose of this redundancy is two
fold: (i) to calculate smoother steering rate
adjustment, (ii) to maintain connectivity in the event
of a failure. We provide an analytical framework to
evaluate the performance of different approaches for
resilient synchronization networks. To evaluate
resiliency of synchronization networks, we characterize
failure recovery metrics like connectivity and failure
detection delay in terms of parameters related to
network topology and failure recovery solutions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2010:RAD,
author = "Xiaozhou Li and Mark Lillibridge and Mustafa Uysal",
title = "Reliability analysis of deduplicated and erasure-coded
storage",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "4--9",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925021",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kulkarni:2010:TAI,
author = "Milind Kulkarni and Vijay Pai and Derek Schuff",
title = "Towards architecture independent metrics for multicore
performance analysis",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "10--14",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925022",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shepard:2010:LMW,
author = "Clayton Shepard and Ahmad Rahmati and Chad Tossell and
Lin Zhong and Phillip Kortum",
title = "{LiveLab}: measuring wireless networks and smartphone
users in the field",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "15--20",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925023",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hahn:2010:UVL,
author = "Dongwoon Hahn and Ginnah Lee and Brenton Walker and
Matt Beecher and Padma Mundur",
title = "Using virtualization and live migration in a scalable
mobile wireless testbed",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "21--25",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925024",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shakkottai:2010:TCD,
author = "Srinivas Shakkottai and Lei Ying and Sankalp Sah",
title = "Targeted coupon distribution using social networks",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "26--30",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925025",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gopalakrishnan:2010:AVG,
author = "Ragavendran Gopalakrishnan and Jason R. Marden and
Adam Wierman",
title = "An architectural view of game theoretic control",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "31--36",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925026",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yao:2010:DDL,
author = "Zhongmei Yao and Daren B. H. Cline and Dmitri
Loguinov",
title = "In-degree dynamics of large-scale {P2P} systems",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "37--42",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925027",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joumblatt:2010:HAE,
author = "Diana Joumblatt and Renata Teixeira and Jaideep
Chandrashekar and Nina Taft",
title = "{HostView}: annotating end-host performance
measurements with user feedback",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "43--48",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925028",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adhikari:2010:TMR,
author = "Vijay Kumar Adhikari and Sourabh Jain and Zhi-Li
Zhang",
title = "From traffic matrix to routing matrix: {PoP} level
traffic characteristics for a {Tier-1 ISP}",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "49--54",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925029",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2010:SIG,
author = "Martin Arlitt and Niklas Carlsson and Jerry Rolia",
title = "Special issue on the {2010 GreenMetrics workshop}",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "??--??",
month = dec,
year = "2010",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnan:2010:VPM,
author = "Bhavani Krishnan and Hrishikesh Amur and Ada
Gavrilovska and Karsten Schwan",
title = "{VM} power metering: feasibility and challenges",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "56--60",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925031",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Phillips:2010:RAI,
author = "Steven Phillips and Sheryl L. Woodward and Mark D.
Feuer and Peter D. Magill",
title = "A regression approach to infer electricity consumption
of legacy telecom equipment",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "61--65",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925032",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sikdar:2010:EII,
author = "Biplab Sikdar",
title = "Environmental impact of {IEEE 802.11} access points: a
case study",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "66--70",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925033",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Wireless local area networks have become an ubiquitous
means for network access in both residential and
commercial locations over the recent past. Given their
widespread deployment, it is of importance to
understand their environmental impact and this paper
presents a life cycle assessment of the energy
intensity of IEEE 802.11 wireless access points.
Following a cradle-to-grave approach, we evaluate the
energy consumed in the manufacture of access points
(including the extraction of raw materials, component
manufacturing, assembly, and transportation) as well as
during its actual usage. Our results show that the
manufacturing stage is responsible for a significant
fraction of the overall energy consumption. In light of
our findings, increasing the overall lifetime is one of
the recommended ways to reduce the environmental impact
of access points.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{V:2010:NDB,
author = "{Prabhakar T.V.} and {Akshay Uttama Nambi S.N.} and
{Jamadagni H.S.} and Krishna Swaroop and R. Venkatesha
Prasad and I. G. M. M. Niemegeers",
title = "A novel {DTN} based energy neutral transfer scheme for
energy harvested {WSN Gateways}",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "71--75",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925034",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lombardo:2010:AES,
author = "Alfio Lombardo and Carla Panarello and Giovanni
Schembra",
title = "Achieving energy savings and {QoS} in {Internet}
access routers",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "76--80",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925035",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bianzino:2010:AAF,
author = "Aruna Prem Bianzino and Anand Kishore Raju and Dario
Rossi",
title = "Apples-to-apples: a framework analysis for
energy-efficiency in networks",
journal = j-SIGMETRICS,
volume = "38",
number = "3",
pages = "81--85",
month = dec,
year = "2010",
CODEN = "????",
DOI = "https://doi.org/10.1145/1925019.1925036",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Wed Jan 12 17:27:21 MST 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Janssen:2011:USD,
author = "Curtis L. Janssen and Helgi Adalsteinsson and Joseph
P. Kenny",
title = "Using simulation to design extremescale applications
and architectures: programming model exploration",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "4--8",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964220",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Giles:2011:PAO,
author = "M. B. Giles and G. R. Mudalige and Z. Sharif and G.
Markall and P. H. J. Kelly",
title = "Performance analysis of the {OP2} framework on
many-core architectures",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "9--15",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964221",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Herdman:2011:BMP,
author = "J. A. Herdman and W. P. Gaudin and D. Turland and S.
D. Hammond",
title = "Benchmarking and modelling of {POWER7}, {Westmere},
{BG/P}, and {GPUs}: an industry case study",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "16--22",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964222",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Pennycook:2011:PAH,
author = "S. J. Pennycook and S. D. Hammond and S. A. Jarvis and
G. R. Mudalige",
title = "Performance analysis of a hybrid {MPI\slash CUDA}
implementation of the {NASLU} benchmark",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "23--29",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964223",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Budanur:2011:MTC,
author = "Sandeep Budanur and Frank Mueller and Todd Gamblin",
title = "Memory Trace Compression and Replay for {SPMD} Systems
using Extended {PRSDs}?",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "30--36",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964224",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Rodrigues:2011:SST,
author = "A. F. Rodrigues and K. S. Hemmert and B. W. Barrett
and C. Kersey and R. Oldfield and M. Weston and R.
Risen and J. Cook and P. Rosenfeld and E. CooperBalls
and B. Jacob",
title = "The {Structural Simulation Toolkit}",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "37--42",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964225",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Karlin:2011:PMP,
author = "Ian Karlin and Elizabeth Jessup and Geoffrey Belter
and Jeremy G. Siek",
title = "Parallel memory prediction for fused linear algebra
kernels",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "43--49",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964226",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Nakasato:2011:FGI,
author = "Naohito Nakasato",
title = "A fast {GEMM} implementation on the {Cypress GPU}",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "50--55",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964227",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Wu:2011:PCH,
author = "Xingfu Wu and Valerie Taylor",
title = "Performance characteristics of hybrid {MPI\slash
OpenMP} implementations of {NAS} parallel benchmarks
{SP} and {BT} on large-scale multicore supercomputers",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "56--62",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964228",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Hsieh:2011:FAL,
author = "Ming-yu Hsieh and Arun Rodrigues and Rolf Riesen and
Kevin Thompson and William Song",
title = "A framework for architecture-level power, area, and
thermal simulation and its application to
network-on-chip design exploration",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "63--68",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964229",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Perks:2011:SWW,
author = "O. Perks and S. D. Hammond and S. J. Pennycook and S.
A. Jarvis",
title = "Should we worry about memory loss?",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "69--74",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964230",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Cook:2011:SPM,
author = "Jeanine Cook and Jonathan Cook and Waleed Alkohlani",
title = "A statistical performance model of the {Opteron}
processor",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "75--80",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964231",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Tabbal:2011:PDE,
author = "Alexandre Tabbal and Matthew Anderson and Maciej
Brodowicz and Hartmut Kaiser and Thomas Sterling",
title = "Preliminary design examination of the {ParalleX}
system from a software and hardware perspective",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "81--87",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964232",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{McIntosh-Smith:2011:EAM,
author = "Simon McIntosh-Smith and Terry Wilson and Jon Crisp
and Amaurys {\'A}vila Ibarra and Richard B. Sessions",
title = "Energy-aware metrics for benchmarking heterogeneous
systems",
journal = j-SIGMETRICS,
volume = "38",
number = "4",
pages = "88--94",
month = mar,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/1964218.1964233",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Fri Apr 1 23:02:55 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
remark = "Special issue on the 1st international workshop on
performance modeling, benchmarking and simulation of
high performance computing systems (PMBS 10).",
}
@Article{Chen:2011:MPR,
author = "Jian Chen and Lizy Kurian John and Dimitris
Kaseridis",
title = "Modeling program resource demand using inherent
program characteristics",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "1--12",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007118",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sharifi:2011:MME,
author = "Akbar Sharifi and Shekhar Srikantaiah and Asit K.
Mishra and Mahmut Kandemir and Chita R. Das",
title = "{METE}: meeting end-to-end {QoS} in multicores through
system-wide resource management",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "13--24",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007119",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:SIC,
author = "Yuanrui Zhang and Mahmut Kandemir and Taylan Yemliha",
title = "Studying inter-core data reuse in multicores",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "25--36",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007120",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2011:SIH,
author = "Fang Liu and Yan Solihin",
title = "Studying the impact of hardware prefetching and
bandwidth partitioning in chip-multiprocessors",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "37--48",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007121",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alizadeh:2011:SAQ,
author = "Mohammad Alizadeh and Abdul Kabbani and Berk Atikoglu
and Balaji Prabhakar",
title = "Stability analysis of {QCN}: the averaging principle",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "49--60",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007123",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Joseph:2011:SNM,
author = "Vinay Joseph and Gustavo de Veciana",
title = "Stochastic networks with multipath flow control:
impact of resource pools on flow-level performance and
network congestion",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "61--72",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007124",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alizadeh:2011:ADS,
author = "Mohammad Alizadeh and Adel Javanmard and Balaji
Prabhakar",
title = "Analysis of {DCTCP}: stability, convergence, and
fairness",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "73--84",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007125",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suh:2011:SEB,
author = "Jinho Suh and Mehrtash Manoochehri and Murali
Annavaram and Michel Dubois",
title = "Soft error benchmarking of {L2} caches with {PARMA}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "85--96",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007127",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Suchara:2011:NAJ,
author = "Martin Suchara and Dahai Xu and Robert Doverspike and
David Johnson and Jennifer Rexford",
title = "Network architecture for joint failure recovery and
traffic engineering",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "97--108",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007128",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Subhraveti:2011:RTP,
author = "Dinesh Subhraveti and Jason Nieh",
title = "Record and transplay: partial checkpointing for replay
debugging across heterogeneous systems",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "109--120",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007129",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tsitsiklis:2011:PEL,
author = "John N. Tsitsiklis and Kuang Xu",
title = "On the power of (even a little) centralization in
distributed processing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "121--132",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007131",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2011:WPA,
author = "Thanh Nguyen and Milan Vojnovic",
title = "Weighted proportional allocation",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "133--144",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007132",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aalto:2011:OTB,
author = "Samuli Aalto and Aleksi Penttinen and Pasi Lassila and
Prajwal Osti",
title = "On the optimal trade-off between {SRPT} and
opportunistic scheduling",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "145--155",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007133",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2011:SAS,
author = "Edith Cohen and Graham Cormode and Nick Duffield",
title = "Structure-aware sampling on data streams",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "157--168",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007135",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Korada:2011:GP,
author = "Satish Babu Korada and Andrea Montanari and Sewoong
Oh",
title = "Gossip {PCA}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "169--180",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007136",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Urgaonkar:2011:OPC,
author = "Rahul Urgaonkar and Bhuvan Urgaonkar and Michael J.
Neely and Anand Sivasubramaniam",
title = "Optimal power cost management using stored energy in
data centers",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "181--192",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007138",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2011:GGL,
author = "Zhenhua Liu and Minghong Lin and Adam Wierman and
Steven H. Low and Lachlan L. H. Andrew",
title = "Greening geographical load balancing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "193--204",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007139",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nguyen:2011:SP,
author = "Giang T. K. Nguyen and Rachit Agarwal and Junda Liu
and Matthew Caesar and P. Brighten Godfrey and Scott
Shenker",
title = "Slick packets",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "205--216",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007141",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lam:2011:GRD,
author = "Simon S. Lam and Chen Qian",
title = "Geographic routing in $d$-dimensional spaces with
guaranteed delivery and low stretch",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "217--228",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007142",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rozner:2011:MDO,
author = "Eric Rozner and Mi Kyung Han and Lili Qiu and Yin
Zhang",
title = "Model-driven optimization of opportunistic routing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "229--240",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007143",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kurant:2011:WGM,
author = "Maciej Kurant and Minas Gjoka and Carter T. Butts and
Athina Markopoulou",
title = "Walking on a graph with a magnifying glass: stratified
sampling via weighted random walks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "241--252",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007145",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anandkumar:2011:TDS,
author = "Animashree Anandkumar and Avinatan Hassidim and
Jonathan Kelner",
title = "Topology discovery of sparse random graphs with few
participants",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "253--264",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007146",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2011:CMI,
author = "M. Zubair Shafiq and Lusheng Ji and Alex X. Liu and
Jia Wang",
title = "Characterizing and modeling {Internet} traffic
dynamics of cellular devices",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "265--276",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007148",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2011:CDN,
author = "Qiang Xu and Junxian Huang and Zhaoguang Wang and Feng
Qian and Alexandre Gerber and Zhuoqing Morley Mao",
title = "Cellular data network infrastructure characterization
and implication on mobile content placement",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "277--288",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007149",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2011:FGL,
author = "Myungjin Lee and Sharon Goldberg and Ramana Rao
Kompella and George Varghese",
title = "Fine-grained latency and loss measurements in the
presence of reordering",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "289--300",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007150",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhou:2011:SOU,
author = "Xia Zhou and Stratis Ioannidis and Laurent Massoulie",
title = "On the stability and optimality of universal swarms",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "301--312",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007151",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eibl:2011:FBE,
author = "Patrick J. Eibl and Albert Meixner and Daniel J.
Sorin",
title = "An {FPGA}-based experimental evaluation of
microprocessor core error detection with {Argus-2}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "313--314",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007153",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:RKD,
author = "Lele Zhang and Darryl Veitch and Kotagiri
Ramamohanarao",
title = "The role of {KL} divergence in anomaly detection",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "315--316",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007154",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krevat:2011:AIL,
author = "Elie Krevat and Tomer Shiran and Eric Anderson and
Joseph Tucek and Jay J. Wylie and Gregory R. Ganger",
title = "Applying idealized lower-bound runtime models to
understand inefficiencies in data-intensive computing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "317--318",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007155",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2011:HPC,
author = "Jinyoung Han and Taejoong Chung and Seungbae Kim and
Ted Taekyoung Kwon and Hyun-chul Kim and Yanghee Choi",
title = "How prevalent is content bundling in {BitTorrent}?",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "319--320",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007156",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rao:2011:SAP,
author = "Jia Rao and Xiangping Bu and Kun Wang and Cheng-Zhong
Xu",
title = "Self-adaptive provisioning of virtualized resources in
cloud computing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "321--322",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007157",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2011:CAR,
author = "Chao Li and Amer Qouneh and Tao Li",
title = "Characterizing and analyzing renewable energy driven
data centers",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "323--324",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007158",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2011:TMB,
author = "Varun Gupta and Takayuki Osogami",
title = "Tight moments-based bounds for queueing systems",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "325--326",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007159",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2011:SMT,
author = "Suk-Bok Lee and Dan Pei and MohammadTaghi Hajiaghayi
and Ioannis Pefkianakis and Songwu Lu and He Yan and
Zihui Ge and Jennifer Yates and Mario Kosseifi",
title = "Scalable monitoring via threshold compression in a
large operational {$3$G} network",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "327--328",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007160",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Adhikari:2011:HDY,
author = "Vijay Kumar Adhikari and Sourabh Jain and Yingying
Chen and Zhi-Li Zhang",
title = "How do you '{Tube}'?",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "329--330",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007161",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kant:2011:CSB,
author = "Krishna Kant",
title = "A control scheme for batching {DRAM} requests to
improve power efficiency",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "331--332",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007162",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:ONS,
author = "Hao Zhang and Ziyu Shao and Minghua Chen and Kannan
Ramchandran",
title = "Optimal neighbor selection in {BitTorrent}-like
peer-to-peer networks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "333--334",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007163",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ihm:2011:TUM,
author = "Sunghwan Ihm and Vivek S. Pai",
title = "Towards understanding modern {Web} traffic",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "335--336",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007164",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akella:2011:OIR,
author = "Aditya Akella and Shuchi Chawla and Holly Esquivel and
Chitra Muthukrishnan",
title = "De-ossifying {Internet} routing through intrinsic
support for end-network and {ISP} selfishness",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "337--338",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007165",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hong:2011:DSP,
author = "Yu-Ju Hong and Jiachen Xue and Mithuna Thottethodi",
title = "Dynamic server provisioning to minimize cost in an
{IaaS} cloud",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "339--340",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007166",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Srinivasan:2011:HHA,
author = "Sadagopan Srinivasan and Ravishankar Iyer and Li Zhao
and Ramesh Illikkal",
title = "{HeteroScouts}: hardware assist for {OS} scheduling in
heterogeneous {CMPs}",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "341--342",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007167",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ribeiro:2011:CCT,
author = "Bruno Ribeiro and Daniel Figueiredo and Edmundo {de
Souza e Silva} and Don Towsley",
title = "Characterizing continuous-time random walks on dynamic
networks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "343--344",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007168",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2011:AAN,
author = "Jian Chen and Lizy Kurian John",
title = "Autocorrelation analysis: a new and improved method
for measuring branch predictability",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "345--346",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007169",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2011:IGM,
author = "Satinder Pal Singh and Randolph Baden and Choon Lee
and Bobby Bhattacharjee and Richard La and Mark
Shayman",
title = "{IP} geolocation in metropolitan areas",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "347--348",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007170",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2011:TBS,
author = "Jay Chen and Janardhan Iyengar and Lakshminarayanan
Subramanian and Bryan Ford",
title = "{TCP} behavior in sub packet regimes",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "349--350",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007171",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bowden:2011:NLT,
author = "Rhys Alistair Bowden and Matthew Roughan and Nigel
Bean",
title = "Network link tomography and compressive sensing",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "351--352",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007172",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulati:2011:STM,
author = "Ajay Gulati and Irfan Ahmad",
title = "Storage technologies, management and troubleshooting
in virtualized datacenters",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "353--354",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007174",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sengupta:2011:CDC,
author = "Sudipta Sengupta",
title = "Cloud data center networks: technologies, trends, and
challenges",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "355--356",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007175",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2011:BAW,
author = "Giuliano Casale",
title = "Building accurate workload models using {Markovian}
arrival processes",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "357--358",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007176",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ciucu:2011:NAC,
author = "Florin Ciucu",
title = "Non-asymptotic capacity and delay analysis of mobile
wireless networks",
journal = j-SIGMETRICS,
volume = "39",
number = "1",
pages = "359--360",
month = jun,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2007116.2007177",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
bibdate = "Thu Aug 18 14:31:37 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elmokashfi:2011:SSI,
author = "Ahmed Elmokashfi and Amund Kvalbein and Constantine
Dovrolis",
title = "{SIMROT}: a scalable inter-domain routing toolbox",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "4--13",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034834",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sen:2011:CIH,
author = "Aritra Sen and Ankit Garg and Akshat Verma and Tapan
Nayak",
title = "{CloudBridge}: on integrated hardware-software
consolidation",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "14--25",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034835",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nair:2011:ENE,
author = "Jayakrishnan Nair and Adam Wierman and Bert Zwart",
title = "Exploiting network effects in the provisioning of
large scale systems",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "26--28",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034837",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nightingale:2011:PES,
author = "James Nightingale and Qi Wang and Christos Grecos",
title = "Performance evaluation of scalable video streaming in
multihomed mobile networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "29--31",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034838",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bouman:2011:DPB,
author = "N. Bouman and S. C. Borst and J. S. H. van
Leeuwaarden",
title = "Delay performance of backlog based random access",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "32--34",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034839",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shneer:2011:CSC,
author = "Seva Shneer and Peter M. van de Ven",
title = "Comparing slotted and continuous {CSMA}: throughputs
and fairness",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "35--37",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034840",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shvets:2011:AMI,
author = "Evgeny Shvets and Andrey Lyakhov and Alexander Safonov
and Evgeny Khorov",
title = "Analytical model of {IEEE 802.11s MCCAbased} streaming
in the presence of noise",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "38--40",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034841",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ayesta:2011:HTA,
author = "U. Ayesta and A. Izagirre and I. M. Verloop",
title = "Heavy traffic analysis of the discriminatory
random-order-of-service discipline",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "41--43",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034842",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Boon:2011:QNS,
author = "M. A. A. Boon and R. D. van der Mei and E. M. M.
Winands",
title = "Queueing networks with a single shared server: light
and heavy traffic",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "44--46",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034843",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Frolkova:2011:FPA,
author = "Maria Frolkova and Josh Reed and Bert Zwart",
title = "Fixed-point approximations of bandwidth sharing
networks with rate constraints",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "47--49",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034844",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cano:2011:IPF,
author = "Maria Dolores Cano",
title = "Improving path failure detection in {SCTP} using
adaptive heartbeat time intervals",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "50--52",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034845",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Varis:2011:NSB,
author = "Nuutti Varis and Jukka Manner",
title = "In the network: {Sandy Bridge} versus {Nehalem}",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "53--55",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anselmi:2011:EPS,
author = "Jonatha Anselmi Anselmi and Bruno Gaujal",
title = "On the efficiency of perfect simulation in monotone
queueing networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "56--58",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034847",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baryshnikov:2011:CLD,
author = "Y. M. Baryshnikov and E. G. Coffman and K. J. Kwak",
title = "{CAUCHY} localization: a distributed computation of
{WSNs}",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "59--61",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034848",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Goga:2011:IFS,
author = "Oana Goga and Patrick Loiseau and Paulo
Gon{\c{c}}alves",
title = "On the impact of the flow size distribution's tail
index on network performance with {TCP} connections",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "62--64",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2011:LBP,
author = "B. {Van Houdt}",
title = "Load balancing and the power of preventive probing",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "65--67",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:2011:PAC,
author = "Eitan Altman and Rachid {El Azouzi} and Daniel S.
Menasch{\'e} and Yuedong Xu",
title = "Poster: Aging control for smartphones in hybrid
networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "68--68",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bokharaei:2011:PTN,
author = "Hossein Kaffash Bokharaei and Yashar Ganjali and Ram
Keralapura and Antonio Nucci",
title = "Poster: Telephony network characterization for spammer
identification",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "69--69",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034853",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bosman:2011:POD,
author = "Joost Bosman and Rob van der Mei and Gerard Hoekstra",
title = "Poster: Optimal dispatching policies for parallel
processor sharing nodes with partial information",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "70--70",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dong:2011:PPS,
author = "Ke Dong and Diptanil DebBarma and R. Venkatesha and
Prasad Cheng Guo",
title = "Poster: Performance study of clustering of {Zigbee}
devices in {OPNET}",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "71--71",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lubben:2011:PCD,
author = "Ralf L{\"u}bben and Markus Fidler",
title = "Poster: On the capacity delay error tradeoff of source
coding",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "72--72",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Marbukh:2011:PTE,
author = "Vladimir Marbukh",
title = "Poster: {Tcp} effective bandwidth and {Internet}
performance",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "73--73",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Massey:2011:PSV,
author = "William A. Massey and Jamol Pender",
title = "Poster: Skewness variance approximation for dynamic
rate {MultiServer} queues with abandonment",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "74--74",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rahman:2011:PGF,
author = "Ashikur Rahman and Carey Williamson",
title = "Poster: {$ \Delta $}-Graphs: flexible topology control
in wireless ad hoc networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "75--75",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rahman:2011:PCM,
author = "Ashikur Rahman and Hanan Shpungin and Carey
Williamson",
title = "Poster: On capacity maximization in wireless relay
networks",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "76--76",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Romano:2011:PSB,
author = "Paolo Romano and Matteo Leonetti",
title = "Poster: Selftuning batching in total order broadcast
via analytical modelling and reinforcement learning",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "77--77",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034861",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yan:2011:PDV,
author = "Zhichao Yan and Dan Feng and Yujuan Tan",
title = "Poster: Dissection the version management schemes in
hardware transactional memory systems",
journal = j-SIGMETRICS,
volume = "39",
number = "2",
pages = "78--78",
month = sep,
year = "2011",
DOI = "https://doi.org/10.1145/2034832.2034862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sat Oct 22 08:04:31 MDT 2011",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
note = "Special Issue on IFIP PERFORMANCE 2011- 29th
International Symposium on Computer Performance,
Modeling, Measurement and Evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Czekster:2011:EVD,
author = "Ricardo M. Czekster and Paulo Fernandes and Thais
Webber",
title = "Efficient vector-descriptor product exploiting
time-memory trade-offs!",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "2--9",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lilja:2011:PAS,
author = "David J. Lilja and Raffaela Mirandola and Kai Sachs",
title = "Paper abstracts of the second international conference
on performance engineering ({ICPE 2011})",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "2--9",
month = dec,
year = "2011",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Squillante:2011:IBT,
author = "Mark S. Squillante",
title = "Instrumentation-based tool for latency measurements
(abstracts only)",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "20--20",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160846",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papadimitriou:2011:PVR,
author = "Dimitri Papadimitriou and Florin Coras and Albert
Cabellos",
title = "Path-vector routing stability analysis",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "22--24",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160848",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhao:2011:DAS,
author = "Haotian Zhao and Yinlong Xu",
title = "A deterministic algorithm of single failed node
recovery in {MSR}-based distributed storage systems",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "25--27",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160849",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Garg:2011:RHD,
author = "Siddharth Garg and Shreyas Sundaram and Hiren D.
Patel",
title = "Robust heterogeneous data center design: a principled
approach",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "28--30",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160850",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2011:RWN,
author = "Ali Tizghadam and Alberto Leon-Garcia and Hassan
Naser",
title = "On robust wireless network optimization using network
criticality",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "31--33",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160851",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lelarge:2011:DCB,
author = "Marc Lelarge",
title = "Diffusion and cascading behavior in random networks",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "34--36",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160852",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abdelrahman:2011:SNH,
author = "Omer H. Abdelrahman and Erol Gelenbe",
title = "Search in non-homogeneous random environments?",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "37--39",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160853",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Feng:2011:EPQ,
author = "Hanhua Feng and Parijat Dube and Li Zhang",
title = "On estimation problems for the {$ G / G / \infty $}
Queue",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "40--42",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160854",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Doroudi:2011:DIF,
author = "Sherwin Doroudi and Ragavendran Gopalakrishnan and
Adam Wierman",
title = "Dispatching to incentivize fast service in
multi-server queues",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "43--45",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160855",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Akgun:2011:PPP,
author = "Osman T. Akgun and Rhonda Righter and Ronald Wolff",
title = "The power of partial power of two choices",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "46--48",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160856",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2011:SLQ,
author = "Ranjan Pal and Sokol Kosta and Pan Hui",
title = "Settling for less: a {QoS} compromise mechanism for
opportunistic mobile networks",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "49--51",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160857",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2011:IEN,
author = "Zichao Yang and John C. S. Lui",
title = "Investigating the effect of node heterogeneity and
network externality on security adoption",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "52--54",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160858",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2011:IPS,
author = "Daniel Sadoc Menasch{\'e} and Antonio A. de A. Rocha
and Edmundo A. {de Souza e Silva} and Don Towsley and
Rosa M. Meri Le{\"a}o",
title = "Implications of peer selection strategies by
publishers on the performance of {P2P} swarming
systems",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "55--57",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160859",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aalto:2011:HIA,
author = "Samuli Aalto and Pasi Lassila and Petri Savolainen and
Sasu Tarkoma",
title = "How impatience affects the performance and scalability
of {P2P} video-on-demand systems",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "58--60",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160860",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arlitt:2011:PGW,
author = "Martin Arlitt and Niklas Carlsson and Jerry Rolia",
title = "{Proceedings of the 2011 GreenMetrics} workshop",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "58--60",
month = dec,
year = "2011",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2011:GLB,
author = "Zhenhua Liu and Minghong Lin and Adam Wierman and
Steven H. Low and Lachlan L. H. Andrew",
title = "Geographical load balancing with renewables",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "62--66",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160862",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Altman:2011:TGC,
author = "Eitan Altman and Manjesh K. Hanawal and Rachid
ElAzouzi and Sholomo Shamai",
title = "Tradeoffs in green cellular networks",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "67--71",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160863",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sucevic:2011:PEE,
author = "Andrew Sucevic and Lachlan L. H. Andrew and Thuy T. T.
Nguyen",
title = "Powering down for energy efficient peer-to-peer file
distribution",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "72--76",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160864",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Brown:2011:RPS,
author = "Michael Brown and Jose Renau",
title = "{ReRack}: power simulation for data centers with
renewable energy generation",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "77--81",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160865",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yan:2011:CRS,
author = "Feng Yan and Xenia Mountrouidou and Alma Riska and
Evgenia Smirni",
title = "Copy rate synchronization with performance guarantees
for work consolidation in storage clusters",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "82--86",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160866",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gupta:2011:APR,
author = "Vishal Gupta and Ripal Nathuji and Karsten Schwan",
title = "An analysis of power reduction in datacenters using
heterogeneous chip multiprocessors",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "87--91",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160867",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2011:HSS,
author = "Giuliano Casale and Ioan Raicu",
title = "{HPDC\slash SIGMETRICS} student research posters",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "92--96",
month = dec,
year = "2011",
CODEN = "????",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chen:2011:UCG,
author = "Doron Chen and Ealan Henis and Ronen I. Kat and Dmitry
Sotnikov and Cinzia Cappiello and Alexandre Mello
Ferreira and Barbara Pernici and Monica Vitali and Tao
Jiang and Jia Liu and Alexander Kipp",
title = "Usage centric green performance indicators",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "92--96",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160868",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2011:BBH,
author = "Yuanrui Zhang and Jun Liu and Sai Prashanth
Muralidhara and Mahmut Kandemir",
title = "{BrickX}: building hybrid systems for recursive
computations",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "98--100",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160870",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blackburn:2011:CGS,
author = "Jeremy Blackburn and Ramanuja Simha and Clayton Long
and Xiang Zuo and Nicolas Kourtellis and John Skvoretz
and Adriana Iamnitchi",
title = "Cheaters in a gaming social network",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "101--103",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160871",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stefanek:2011:FCP,
author = "Anton Stefanek and Richard A. Hayden and Jeremy T.
Bradley",
title = "Fluid computation of the performance: energy tradeoff
in large scale {Markov} models",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "104--106",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160872",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2011:IHP,
author = "Shingyu Kim and Junghee Won and Hyuck Han and
Hyeonsang Eom and Heon Y. Yeom",
title = "Improving {Hadoop} performance in intercloud
environments",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "107--109",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160873",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2011:IPE,
author = "Yong Oh Lee",
title = "Improving performance and energy savings through
alternative forwarding",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "110--112",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160874",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Choi:2011:IPM,
author = "Seungmi Choi and Shingyu Kim and Hyuck Han and Heon Y.
Yeom",
title = "Improving performance of {MapReduce} framework on
{InterCloud} by avoiding transmission of unnecessary
data",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "113--115",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160875",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gadre:2011:IMF,
author = "Hrishikesh Gadre and Ivan Rodero and Manish Parashar",
title = "Investigating {MapReduce} framework extensions for
efficient processing of geographically scattered
datasets",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "116--118",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160876",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hayden:2011:MFA,
author = "Richard A. Hayden",
title = "Mean-field approximations for performance models with
generally-timed transitions",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "119--121",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160877",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gandhi:2011:MMV,
author = "Rohan Gandhi and Dimitrios Koutsonikolas and Y.
Charlie Hu",
title = "Multicasting {MDC} videos to receivers with different
screen resolution",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "122--124",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160878",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sawalha:2011:TSH,
author = "Lina Sawalha and Monte P. Tull and Ronald D. Barnes",
title = "Thread scheduling for heterogeneous multicore
processors using phase identification",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "125--127",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160879",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Li:2011:EDH,
author = "Tonglin Li and Raman Verma and Xi Duan and Hui Jin and
Ioan Raicu",
title = "Exploring distributed hash tables in {HighEnd}
computing",
journal = j-SIGMETRICS,
volume = "39",
number = "3",
pages = "128--130",
month = dec,
year = "2011",
CODEN = "????",
DOI = "https://doi.org/10.1145/2160803.2160880",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Thu Mar 15 10:13:16 MDT 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Burdette:2012:ECJ,
author = "Philip F. Burdette and William F. Jones and Brian C.
Blose and Gregory M. Kapfhammer",
title = "An empirical comparison of {Java} remote communication
primitives for intra-node data transmission",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "2--11",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185397",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents a benchmarking suite that measures
the performance of using sockets and eXtensible Markup
Language remote procedure calls (XML-RPC) to exchange
intra-node messages between Java virtual machines
(JVMs). The paper also reports on an empirical study
comparing sockets and XML-RPC with response time
measurements from timers that use both operating system
tools and Java language instrumentation. By leveraging
packet filters inside the GNU/Linux kernel, the
benchmark suite also calculates network resource
consumption. Moreover, the framework interprets the
response time results in light of memory subsystem
metrics characterizing the behavior of the JVM. The
empirical findings indicate that sockets perform better
when transmitting small to very large objects, while
XML-RPC exhibits lower response time than sockets with
extremely large bulk data transfers. The experiments
reveal trade-offs in performance and thus represent the
first step towards determining if Java remote
communication primitives can support the efficient
exchange of intra-node messages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gopalakrishnan:2012:SUT,
author = "Sathish Gopalakrishnan",
title = "Sharp utilization thresholds for some realtime
scheduling problems",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "12--22",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185398",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling policies for real-time systems exhibit
threshold behavior that is related to the utilization
of the task set they schedule, and in some cases this
threshold is sharp. A task set is considered
schedulable if it can be scheduled to meet all
associated deadlines. A schedulability test for a
chosen policy is a test of feasibility: given a task
set, can all deadlines be met? For the rate monotonic
scheduling policy, we show that periodic workload with
utilization less than a threshold U$_{RM}$ can be
scheduled almost surely and that all workload with
utilization greater than U$_{RM}$ is almost surely not
schedulable. We study such sharp threshold behavior in
the context of processor scheduling using static task
priorities, not only for periodic real-time tasks but
for aperiodic real-time tasks as well. The notion of a
utilization threshold provides a simple schedulability
test for most real-time applications. These results
improve our understanding of scheduling policies and
provide an interesting characterization of the typical
behavior of policies. The threshold is sharp (small
deviations around the threshold cause schedulability,
as a property, to appear or disappear) for most
policies; this is a happy consequence that can be used
to address the limitations of existing
utilization-based tests for schedulability. We
demonstrate the use of such an approach for balancing
power consumption with the need to meet deadlines in
web servers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2012:SLR,
author = "Edward G. Coffman",
title = "Synthesis of local-rule processes: successes and
challenges (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185400",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "How does one systematically program global
computations in systems of a vast number of components
restricted to local-rule interaction in a flat
hierarchy? This question has been around since the 50's
when cellular automata were introduced as models of
such systems. The question posed here is known as the
synthesis problem, and remains poorly understood. Terms
like self-assembling and self-organizing are often used
to describe computations on such systems. We mention a
number of instances of local-rule processes at widely
different scales in computer and network engineering:
molecular computation, sensor-network computation, and
computation on the Web. Typical performance questions
that we address include the convergence to useful,
non-degenerate behavior: does it always occur, and if
so, how long does it take.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kou:2012:FPT,
author = "Steven S. G. Kou",
title = "First passage times and option pricing under a
mixed-exponential jump diffusion model (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185401",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper aims at extending the analytical
tractability of the Black- Scholes model to alternative
models with arbitrary jump size distributions. More
precisely, we propose a jump diffusion model for asset
prices whose jump sizes have a mixed-exponential
distribution, which is a weighted average of
exponential distributions but with possibly negative
weights. The new model extends existing models, such as
hyper-exponential and double-exponential jump diffusion
models, as the mixed-exponential distribution can
approximate any distribution as closely as possible,
including the normal distribution and various
heavy-tailed distributions. The mixed-exponential jump
diffusion model can lead to analytical solutions for
Laplace transforms of prices and sensitivity parameters
for path-dependent options such as lookback and barrier
options. The Laplace transforms can be inverted via the
Euler inversion algorithm. Numerical experiments
indicate that the formulae are easy to implement and
accurate. The analytical solutions are made possible
mainly because we solve a high-order
integro-differential equation related to first passage
times explicitly. A calibratrion example for SPY
options shows that the model can provide a reasonable
fit even for options with very short maturity, such as
one day. This is a joint work with Ning Cai at Hong
Kong Univ. of Science and Technology.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Neuts:2012:AMS,
author = "Marcel F. Neuts",
title = "The algorithmization of mathematics: the story of
stochastic models (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185402",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2012:PFD,
author = "Devavrat Shah",
title = "Product-form distributions and network algorithms
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "24--24",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185403",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The ``product-form'' characterization of the
stationary distribution make a queueing network
analytically a lot more tractable. This has been the
primary source of inspiration in the search for
``product-form'' characterization. In this talk, I will
discuss implications of ``product-form'' distributions
for algorithm design by means of two examples: (i)
intra-queue scheduling and (ii) inter-queue scheduling
in a constrained queueing network. Near the end of the
talk, by means of a novel comparison result between
stationary distributions of Markov chains, I will
briefly discuss notion of ``approximate'' product-form
distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Baek:2012:FPM,
author = "Jung Woo Baek and Ho Woo Lee and Se Won Lee and Soohan
Ahn",
title = "Factorization properties for a {MAP}-modulated fluid
flow model under server vacation policies (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "25--25",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185404",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we study a MAP-modulated fluid flow
model under generalized server vacation policies and
propose factorization properties that can be
efficiently used to derive the fluid level
distributions at an arbitrary time point. Our model is
an extension of the conventional Markov modulated fluid
flow (MMFF) model to control the servers idle state. We
consider two types of fluid increases: vertical
increase (Type-V) and linear increase (Type-L). We
first describe the MAP-modulated fluid flow model under
server vacation policies and prove the factorization
principle for each type. Based on the factorization
formulae, we derive recursive formulae for performance
measures. Lastly, some application examples of the
factorization property are presented.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bladt:2012:BME,
author = "Mogens Bladt and Luz Judith R. Esparza and Bo Friis
Nielsen",
title = "Bilateral matrix-exponential distributions (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "25--25",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185405",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this article we define the classes of bilateral and
multivariate bilateral matrix-exponential
distributions. These distributions have support on the
entire real space and have rational moment-generating
functions. These distributions extend the class of
bilateral phasetype distributions of [1] and the class
of multivariate matrix-exponential distributions of
[9]. We prove a characterization theorem stating that a
random variable has a bilateral multivariate
distribution if and only if all linear combinations of
the coordinates have a univariate bilateral
matrix-exponential distribution. As an application we
demonstrate that certain multivariate divisions, which
are governed by the underlying Markov jump process
generating a phasetype distribution, have a bilateral
matrix-exponential distribution at the time of
absorption, see also [4].",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bladt:2012:MDP,
author = "Mogens Bladt and Bo Friis Nielsen",
title = "Moment distributions of phase-type (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "25--26",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185406",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Both matrix-exponential and phase-type distributions
have a number of important closure properties. Among
those are the distributions of the age and residual
life-time of a stationary renewal process with
inter-arrivals of either type. In this talk we show
that the spread, which is the sum of the age an
residual life-time, is also phase-type distributed.
Moreover, we give some explicit representations. The
spread is known to have a first order moment
distribution. If $X$ is a positive random variable and
$ ?i$ is its $i$'th moment, then the function $ f i(x)
= x i f(x) / ?i$ is a density function, and the
corresponding distribution is called the $i$'th order
moment distribution.\par
We prove that the classes of matrix-exponential or
phase-type distributions are closed under the formation
of moment distributions of any order. Other
distributions which are closed under the formation of
moment distributions are e.g., log-normal, Pareto and
gamma distributions. We provide explicit
representations for both the matrix-exponential class
and for the phase-type distributions, where the latter
class may also use the former representations, but for
various reasons it is desirable to establish a
phase-type representation when dealing with phase-type
distributions.\par
For the first order distribution we present an explicit
formula for the related Lorenz curve and Gini index.
Moment distributions of orders one, two and three have
been extensively used in areas such as economy,
physics, demography and civil engineering.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Drekic:2012:SPP,
author = "Steve Drekic and David Stanford and Douglas Woolford",
title = "A self-promoting priority model for transplant queues
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "26--26",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185407",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In many jurisdictions, organ allocation is done on the
basis of the health status of the patient, either
explicitly or implicitly. This presentation presents a
Matrix-analytic priority model in which customers
self-promote to the higher priority level, to take into
account changes in health status over time. In the
first variant, all patients arrive as ``regular''
customers to the queue, but as the health of a patient
degrades, their status is promoted to ``priority'' to
reflect the increased urgency of the transplant.
Performance measures such as the mean and distribution
of the time until transplant are obtained.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Fackrell:2012:CME,
author = "Mark Fackrell",
title = "Characterizing matrix-exponential distributions of
order $4$ (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "26--26",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185408",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Laplace--Stieltjes transform of a
matrix-exponential distribution is a rational function.
If there are no common factors between the numerator
and denominator polynomials, then the order of the
matrix-exponential distribution is the degree of the
denominator polynomial. Given a rational
Laplace--Stieltjes transform, it is unknown, in
general, when it corresponds to a matrix-exponential
distribution. Matrix-exponential distributions of order
3 have been completely characterized in this manner,
but in this talk we look at the problem of
characterizing matrix-exponential distributions of
order 4.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hautphenne:2012:EAM,
author = "Sophie Hautphenne",
title = "An {EM} algorithm for the model fitting of {Markovian}
binary trees (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "26--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185409",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Markovian binary trees are a special class of
branching processes in which the lifetime of an
individual is controlled by a transient Markovian
arrival process. A Markovian binary tree is
characterized by the 4-tuple ( ? ,D0,B, d ), where ? is
the vector of initial phase distribution of the first
individual, D0 is the matrix of phase transition rates
between birth and death events, B is the matrix of
birth rates and d is the vector of death rates. In
order to use the Markovian binary tree to model the
evolution of a real population, we need to determine
the parameters ( ? ,D0,B, d ) from observations of that
population. In the absence of migration, the only
observable changes in a population are those associated
with a birth or a death event; no phase transition in
the underlying process can actually been seen. We are
thus dealing with a problem of parameter estimation
from incomplete data, and one way to solve this
statistical problem is to make use of the EM algorithm.
Our purpose here is thus to specify this algorithm to
the Markovian binary tree setting. In the first part of
this paper, we introduce a discrete time terminating
marked Markov arrival process (MMAP), based on which a
class of discrete multivariate phase-type (MPH)
distributions is defined. The discrete
MPH-distributions hold many of the properties possessed
by continuous MPH-distributions (Assaf, et al. (1983),
Kulkarni (1988), and O'Cinneide (1990)). It is known
that the joint distribution functions of continuous MPH
are fairly complicated and difficult to calculate. In
contrast, for the discrete MPH introduced here, we
provide recursive formulas the joint probabilities and
explicit expressions for means, variances, and
co-variances.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hautphenne:2012:MTS,
author = "Sophie Hautphenne and Guy Latouche and Giang T.
Nguyen",
title = "{Markovian} trees subject to catastrophes: would they
survive forever? (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185410",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider multi-type Markovian branching processes
subject to catastrophes which kill random numbers of
living individuals at random epochs. It is well known
that the criteria for extinction of such a process is
related to the conditional growth rate of the
population, given the history of the process of
catastrophes, and that it is usually hard to evaluate.
We give a simple characterization in the case when all
individuals have the same probability of surviving a
catastrophe, and we determine upper and lower bounds in
the case where survival depends on the type of the
individual. The upper bound appears to be often much
tighter than the lower bound.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2012:DMV,
author = "Qi-Ming He and Jiandong Ren",
title = "On a discrete multi-variate phase-type distribution
and its applications (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185411",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the second part of this paper, we use the discrete
MPH-distributions to model multi-variate insurance
claim processes in risk analysis, where claims may
arrive in batches, the arrivals of different types of
batches may be correlated, and the amounts of different
types of claims in a batch may be dependent. This
provides one natural approach to model the dependencies
among claim frequencies as well claim sizes of
different types of risks, which is a very important
topic in insurance risk theory. Under certain
conditions, it is shown that the total amounts of
claims accumulated in some random time horizon are
discrete MPH random vectors. Matrix representations of
the discrete MPH-distributions are constructed
explicitly. Efficient computational methods are
developed for computing performance measures of the
total claims of different types of claim batches and
individual types of claims.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{He:2012:MEP,
author = "Qi-Ming He and Hanqin Zhang and Juan Vera",
title = "Majorization and {Extremal PH}-Distributions (abstract
only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185412",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents majorization results for PH
-generators. Based on the majorization results, Coxian
distributions are identified to be extremal PH
-distributions with respect to the first moment for
certain subsets of PH -distributions. Bounds on the
mean of phase-type distributions are found. In
addition, numerical results indicate that Coxian
distributions are extremal PH -distributions with
respect to the moment of any order for certain subsets
of PH -distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Horvath:2012:ARM,
author = "G{\'a}bor Horv{\'a}th and Mikl{\'o}s Telek",
title = "Acceptance-rejection methods for generating random
variates from matrix exponential distributions and
rational arrival processes (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "27--27",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185413",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Stochastic models based on matrix exponential
structures, like matrix exponential distributions and
rational arrival processes, have gained popularity in
analytical models recently. However the application of
these models in simulation based evaluations is not as
widespread yet. One of the possible reasons is the lack
of efficient random variates generation methods. In
this paper we propose methods for efficient random
variates generation for matrix exponential stochastic
models based on appropriate representations of the
models.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kobayashi:2012:TAS,
author = "Masahiro Kobayashi and Yutaka Sakuma and Masakiyo
Miyazawa",
title = "Tail asymptotics of the stationary distribution for
{M/M-JSQ} with $k$ parallel queues (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "28--28",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185414",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a parallel queueing model which has k
identical servers. Assume that customers arrive from
outside according to a Poisson process and join the
shortest queue. Their service times have an i.i.d.
exponential distribution, which is referred to as an
M/MJSQ with k parallel queues. We are interested in the
asymptotic behavior of the stationary distribution for
the shortest queue length of this model, provided the
stability is assumed. For this stationary distribution,
it can be guessed conjectured that the tail decay rate
is given by the k-th power of the traffic intensity of
the corresponding M/M/k queue with a single waiting
line. We prove this fact by obtaining the exactly
geometric asymptotics. For this, we use two
formulations. One is a quasi-birth-and-death (QBD for
short) process which is typically used, and the other
is a reflecting random walk on the boundary of the k +
1-dimensional orthant which is a key for our proof.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Krishnamoorthy:2012:SDP,
author = "A. Krishnamoorthy and Viswanath C. Narayanan",
title = "Stochastic decomposition in production inventory with
service time (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "28--28",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185415",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study an $ (s, S) $ inventory system with positive
service time (for an overview of the work reported so
far in inventory with positive service time one may
refer to Krishnamoorthy, Lakshmi and Manikandan: A
survey on inventory models with positive service time,
OPSEARCH, DOI 10.1007/s12597-010-0032-z). This leads to
a queue of demands being formed. The process of demand
arrival constitutes a Poisson process. The duration of
each service is exponentially distributed. Our model is
a supply chain where items are added to the inventory
through a production process. This starts each time the
inventory level goes down to $s$ and continues to be on
until inventory level reaches $S$ with the time
required to add one unit of the item into the inventory
when the production is on, are independent, identically
distributed exponential random variables. Further all
distributions involved in this paper are assumed to be
mutually independent. We assume that no customer joins
the queue when the inventory level is $0$. This
assumption leads us to an explicit product form
solution for the steady state probability vector, using
a simple approach. This is despite the fact that there
is a strong correlation between lead time (the time
required to add an item into the inventory) and the
number of customers joining the queue during the lead
time (except when the inventory level is zero during
which time no customer joins the queue). The technique
is to combine the steady state probability vector of
the classical M/M/1 queue and that of the production
inventory system where each service requires negligible
time and no backlogs are allowed. Using a similar
technique, the expected length of a production cycle is
also obtained explicitly. The optimality of the highest
inventory level $S$ and the production switching on
level $s$ has been studied using a cost function
constructed using the steady state system performance
measures. Since we have obtained explicit expressions
for these measures, analytic expressions have been
derived for the optimal values of $S$ and $s$.\par
To show that our method can be applied to other similar
problems, we analyze in detail a variant of the above
problem (discussed in Schwarz M, Sauer C, Daduna H,
Kulik R and Szekli R: M/M/1 Queueing systems with
inventory, {\em Queueing Systems}, 54, 55--78, 2006).
For that model, we assume that in a production run,
production occurs only once in a cycle and the amount
produced is sufficient to take the inventory level back
to $S$. A brief discussion on the application of our
method to inventory system with lead time for
replenishment has also been provided.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Latouche:2012:TDF,
author = "Guy Latouche and Giang T. Nguyen and Zbigniew
Palmowski",
title = "Two-dimensional fluid queues with temporary assistance
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "28--28",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185416",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a two-dimensional stochastic fluid model
with N ONOFF inputs and temporary assistance, which is
an extension of the same model with N = 1 in
Mahabhashyam et al. (2008). The rates of change of both
buffers are piecewise constant and dependent on the
underlying Markovian phase of the model, and the rates
of change for Buffer 2 are also dependent on the
specific level of Buffer 1. This is because both
buffers share a fixed output capacity, the precise
proportion of which depends on Buffer 1. The
generalization of the number of ON-OFF inputs
necessitates modifications in the original rules of
output-capacity sharing from Mahabhashyam et al. (2008)
and considerably complicates both the theoretical
analysis and the numerical computation of various
performance measures. We derive the marginal
probability distribution of Buffer 1, and bounds for
that of Buffer 2. Furthermore, restricting Buffer 1 to
a finite size, we determine its marginal probability
distribution in the specific case of N = 1, thus
providing numerical comparisons to the corresponding
results in Mahabhashyam et al. (2008) where Buffer 1 is
assumed to be infinite.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramaswami:2012:FIB,
author = "V. Ramaswami",
title = "A fluid introduction to {Brownian} motion \&
stochastic integration (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--29",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185417",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This presentation provides an introduction to Brownian
motion and stochastic integrals using linear fluid
flows on finite state Markov chains. Many numerical
examples are presented setting the stage for the
development of algorithms for stochastic integration
via the well-studied and easily understood fluid flow
models driven by finite state Markov chains.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sonenberg:2012:NFM,
author = "Nikki Sonenberg and Peter G. Taylor",
title = "A network of fluid models and its application in
{MANETs} (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--29",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185418",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Ad hoc mobile networks are peer-to-peer systems whose
successful operation depends on the nodes contributing
the resources of their device. Nodes rely on portable
energy sources, for example batteries, to transmit to
each other. For the network to function, either the
nodes willingly cooperate or their behaviour is
influenced by an incentive mechanism. Building on work
by Latouche and Taylor (2009) and assuming finite
capacity buffers, we model each user's battery energy
and credit balance as fluids, with the rate of increase
or decrease of the fluid modulated by the network call
occupancy. This results in a network of stochastic
fluid models, which we analyse using a reduced-load
approach. We study the resources required to ensure the
network can maintain itself without having to drop
calls and investigate the design of a credit incentive
mechanism to discourage uncooperative behaviour in the
sharing of resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Stanford:2012:NPP,
author = "David Stanford and Peter G. Taylor and Ilze Ziedins",
title = "A new paradigm for priority patient selection
(abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--29",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185419",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The central purpose of this work is to bridge the gap
between two aspects of health care systems: (1) Key
Performance Indicators (KPIs) for delay in access to
care for patient classes, with differing levels of
acuity or urgency, specify the fraction of patients
needing to be seen by some key time point. (2) Patient
classes present themselves for care, and consume health
care resources, in a fashion that is totally
independent of the KPIs. Rather, they present in a
manner determined by the prevalence of the medical
condition, at a rate that may vary over time. Treatment
times will likewise be determined by medical need and
current practice. There is no reason to expect the
resulting system performance will adhere to the
specified KPIs. The present work presents a new
paradigm for priority assignment that enables one to
fine-tune the system in order to achieve the delay
targets, assuming sufficient capacity exists for at
least one such arrangement.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Toyoizumi:2012:ADS,
author = "Hiroshi Toyoizumi and Jeremy Field",
title = "Analysis of the dynamics of social queues by
quasi-birth-and-death processes (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "29--30",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185420",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A wide variety of animals are known to form simple
hierarchical groups called social queues, where
individuals inherit resources or social status in a
predictable order. Queues are often age-based, so that
a new individual joins the end of the queue on reaching
adulthood, and must wait for older individuals to die
in order to reach the front of the queue. While
waiting, an individual may work for her group, in the
process often risking her own survival and hence her
chance of inheritance. Eventually, she may survive to
reach the head of the queue and becomes the dominant of
the group. Queueing has been particularly well-studied
in hover wasps (Hymenoptera: Stenogastrinae). In hover
wasp social groups, only one female lays eggs, and
there is a strict, age-based queue to inherit the
reproductive position. While the dominant individual
(queen) concentrates on breeding, subordinate helpers
risk death by foraging outside the nest, but have a
slim chance of eventually inheriting dominance. Some
explanations for this altruistic behavior and for the
stability of social queues have been proposed and
analyzed [1, 2]. Since both the productivity of the
nest and the chance to inherit the dominant position
depend critically on group size, queueing dynamics are
crucial for understanding social queues, but detailed
analysis is lacking. Here, using hover wasps as an
example, we demonstrate that the application of
Little's formula [3] and quasi-birth-and-death (QBD)
processes are useful for analyzing queueing dynamics
and the population demographics of social queues. Let
(L(t),M(t)) be the number of adults and brood (eggs,
larvae and pupae) in a nest at time t. We model the
vector (L(t),M(t)) as a QBD process starting from the
state (L(0),M(0)) = (1, 0) to analyze the nest history
of a social queue. The boundary state {L(t) = 0}, which
corresponds to the termination of the nest, is regarded
as the taboo state of this QBD process. Let Q be the
transition rate matrix of the taboo process. By
choosing different Q, we can set various conditions for
the social queue. By using standard technique such as
calculating Q ?1, we can estimate and compare the
productivity of the nest in wide variety of social
queues in different queueing and environmental
conditions. Our work leads to better understanding of
how environmental conditions and strategic
decision-making by individuals interact to produce the
observed group dynamics; and in turn, how group
dynamics affects individual decision-making.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2012:IDD,
author = "B. {Van Houdt} and J. F. P{\'e}rez",
title = "The impact of dampening demand variability in a
production\slash inventory system with multiple
retailers (abstract only)",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "30--30",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185421",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We study a supply chain consisting of a single
manufacturer and two retailers. The manufacturer
produces goods on a make-to-order basis, while both
retailers maintain an inventory and use a periodic
replenishment rule. As opposed to the traditional $ (r,
S) $ policy, where a retailer at the end of each period
orders the demand seen during the previous period, we
assume that the retailers dampen their demand
variability by smoothing the order size. More
specifically, the order placed at the end of a period
is equal to $ \beta $ times the demand seen during the
last period plus $ (1 ? \beta) $ times the previous
order size, with $ \beta ? (0, 1] $ the smoothing
parameter. We develop a GI/M/1-type Markov chain with
only two nonzero blocks $ A_0 $ and $ A_d $ to analyze
this supply chain. The dimension of these blocks
prohibits us from computing its rate matrix R in order
to obtain the steady state probabilities. Instead we
rely on fast numerical methods that exploit the
structure of the matrices $ A_0 $ and $ A_d $, i.e.,
the power method, the Gauss--Seidel iteration and
GMRES, to approximate the steady state probabilities.
Finally, we provide various numerical examples that
indicate that the smoothing parameters can be set in
such a manner that all the involved parties benefit
from smoothing. We consider both homogeneous and
heterogeneous settings for the smoothing parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bean:2012:AQR,
author = "Nigel G. Bean and Bo Friis Nielsen",
title = "Analysis of queues with rational arrival process
components: a general approach",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "31--31",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185422",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bean:2012:SFM,
author = "Nigel G. Bean and Ma{\l}gorzata M. O'Reilly",
title = "A stochastic fluid model driven by an
uncountable-state process, which is a stochastic fluid
model itself: the stochastic fluid-fluid model",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "32--32",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185423",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bini:2012:CCR,
author = "Dario A. Bini and Paola Favati and Beatrice Meini",
title = "A compressed cyclic reduction for {QBDs} with low rank
upper and lower transitions",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "33--33",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185424",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Consider a quasi-birth-and-death (QBD) Markov chain
[6], having probability transition matrix where Bi, Ai,
i = ?1, 0, 1, are m x m matrices. In the numerical
solution of QBD Markov chains a crucial step is the
efficient computation of the minimal nonnegative
solution R of the quadratic matrix equation X = X$_2$
A?1 + XA$_0$ + A$_1$. (1) To this purpose, many
numerical methods, with different properties, have been
designed in the last years (see for instance [1, 2, 3,
4]). However, many of these numerical methods are
defined for general block coefficients A?1, A0 and A1,
and do not exploit the possible structure of these
blocks. Recently, some attention has been addressed to
the case where A$_{?1}$ has only few non-null columns,
or A1 has only few non-null rows. These properties are
satisfied when the QBD has restricted transitions to
higher (or lower) levels. In particular, in [7] the
authors exploit these properties of the matrix
A$_{?1}$, or A$_1$, to formulate the QBD in terms of an
M/G/1 type Markov chain, where the block matrices have
size smaller than m; in particular, when both A?1 and
A1 have the desired property, the latter M/G/1 type
Markov chain reduces to a QBD. In [5] the structure of
A$_{?1}$ is used in order to reduce the computational
cost of some algorithms for computing R. Here we assume
that both the matrices A$_{?1}$ and A$_1$ have small
rank with respect to their size m. In particular, if
A?1 and A1 have only few non-null columns and rows,
respectively, they have small rank. We show that, under
this assumption, the matrix R can be computed by using
the cyclic reduction algorithm, where the matrices A(k)
i, i = ?1, 0, 1, generated at the kth step of the
algorithm, can be represented by small rank matrices.
In particular, if r$_{?1}$ is the rank of A$_{?1}$, and
if r$_1$ is the rank of A$_1$, then each step of cyclic
reduction can be performed by means of O((r$_{?1 + r1}$
)$_3$ ) arithmetic operations. This cost estimate must
be compared with the cost of O(m3) arithmetic
operations, needed without exploiting the structure of
A$_{?1}$ and A$_1$. Therefore, if r$_1$ and r$_1$ /are
much smaller than m, the advantage is evident. It
remains an open issue to understand how the structure
can be exploited in the case where only one between
A$_{?1}$ and A$_1$ has low rank.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bladt:2012:OMG,
author = "Mogens Bladt and Bo Friis Nielsen",
title = "An overview of multivariate gamma distributions as
seen from a (multivariate) matrix exponential
perspective",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "34--34",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185425",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Numerous definitions of multivariate exponential and
gamma distributions can be retrieved from the
literature [4]. These distributions belong to the class
of Multivariate Matrix-- Exponetial Distributions
(MVME) whenever their joint Laplace transform is a
rational function. The majority of these distributions
further belongs to an important subclass of MVME
distributions [5, 1] where the multivariate random
vector can be interpreted as a number of simultaneously
collected rewards during sojourns in a the states of a
Markov chain with one absorbing state, the rest of the
states being transient. We present the corresponding
representations for all such distributions. In this way
we obtain a unification of the variety of existing
distributions as well as a deeper understanding of
their probabilistic nature and a clarification of their
similarities and differences. In particular one may
easily generalize or combine any of the known
distributions by modifying the generators adequately.
Also, it is straightforward to simulate from this
class. Thus, by identifying distributions as belonging
to this subclass it becomes apparent how to simulate
from most previously discussed distributions with
rational Laplace transform. In a longer perspective
stochastic and statistical analysis for MVME will in
particular apply to any of the previously defined
distributions. Multivariate gamma distributions have
been used in a variety of fields like hydrology, [11],
[10], [6], space (wind modeling) [9] reliability [3],
[7], traffic modeling [8], and, finance [2]. It is our
hope that our the paper will assist practitioners in
formulating and analyzing models in a much more
transparent and easily accessible way.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blanchet:2012:RES,
author = "Jose Blanchet and Jing Dong",
title = "Rare-event simulation for multi-server queues in the
{Halfin--Whitt} regime",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "35--35",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185426",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2012:PFA,
author = "Giuliano Casale and Peter G. Harrison and Maria Grazia
Vigliotti",
title = "Product-form approximation of queueing networks with
phase-type service",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "36--36",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185427",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dai:2012:NAD,
author = "J. G. Dai and Shuangchi He",
title = "Numerical analysis for diffusion models of {GI/Ph/n
$+$ GI} queues",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "37--37",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185428",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denardo:2012:SFM,
author = "Eric V. Denardo and Eugene A. Feinberg and Uriel G.
Rothblum",
title = "Splitting in a finite {Markov} decision problem",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "38--38",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185429",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Denardo:2012:MAB,
author = "Eric V. Denardo and Eugene A. Feinberg and Uriel G.
Rothblum",
title = "The multi-armed bandit, with constraints",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "39--39",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185430",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The colorfully-named and much-studied multi-armed
bandit is the following Markov decision problem: At
epochs 1, 2, \ldots{}, a decision maker observes the
current state of each of several Markov chains with
rewards (bandits) and plays one of them. The Markov
chains that are not played remain in their current
states. The Markov chain that is played evolves for one
transition according to its transition probabilities,
earning an immediate reward (possibly negative) that
can depend upon its current state and on the state to
which transition occurs. Henceforth, to distinguish the
states of the individual Markov chains from those of
the Markov decision problem, the latter are called
multi-states. Each multi-state prescribes a state for
each of the Markov chains. This version of the
multi-armed bandit problem was originally solved by
John Gittins. It has a large range of operations
research applications including applications to
resource allocation, scheduling, project management,
and search. A key result for the multi-armed bandit is
that attention can be restricted to a simple class of
decision procedures. A label is assigned to each state
of each bandit such that no two states have the same
label, even if they are in different bandits. A
priority rule is a policy that, given each multistate,
plays the Markov chain whose current state has the
lowest label. The literature includes several different
proofs of the optimality of a priority rule. Nearly all
of these proofs rest on a family of optimal stopping
times, one for each state of each bandit. A different
approach is taken here. Pair-wise comparison, rather
than optimal stopping, is used to demonstrate the
optimality of a priority rule. This is accomplished for
models having linear and exponential utility functions.
Elementary row operations are used to identify an
optimal priority rule and to compute its expected
utility for a given starting state. Our analysis covers
the cases of linear and exponential utilities. In the
case of a linear utility function, the model is
generalized to include constraints that link the
bandits. With C constraints, an optimal policy is shown
to take the form of an initial randomization over C + 1
priority rules, and column generation is proposed as a
solution method. The proposed computational methods are
based on several matrix algorithms. First, an
algorithm, called the Triangularizer, transforms the
one-step rewards and transition probability matrixes
for individual bandits by applying elementary row
operations. The transformed matrixes, called finalized,
are triangle: all their elements on diagonals and below
diagonals are equal to zero. For a given index policy,
running the transformed bandits is equivalent to
running the original bandits. Second, the transition
probabilities and one-step rewards of the transformed
bandits are used to compute the performance
characteristics of index policies in polynomial times.
These computations are used by the column generation
algorithm for multi-armed bandits with constraints.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dendievel:2012:SDP,
author = "S. Dendievel and G. Latouche and M-A. Remiche",
title = "Stationary distribution of a perturbed {QBD} process",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "40--40",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185431",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider Quasi-Birth-and-Death processes and our
purpose is to assess the impact of small variation of
the initial parameters.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Govorun:2012:PRP,
author = "Maria Govorun and Guy Latouche and Marie-Ange
Remiche",
title = "Profits and risks of pension plans",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "41--41",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185432",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kobayashi:2012:RTA,
author = "Masahiro Kobayashi and Masakiyo Miyazawa",
title = "Revisit to the tail asymptotics of the double {QBD}
process by the analytic function method",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "42--42",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185433",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2012:FMM,
author = "Yunan Liu and Ward Whitt",
title = "A fluid model for many-server queues with time-varying
arrivals and phase-type service distribution",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "43--43",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185434",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Margolius:2012:NSM,
author = "Barbara Margolius",
title = "Numerical study of {Markovian} arrival processes
{(MAP)} with time-varying periodic arrival rates",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "44--44",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185435",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the arrival rate of a Markovian Arrival
Process with time-varying periodic transition rates.
The arrival rate can vary widely for a MAP with fixed
average transition rates by selecting appropriate
transition rate functions over the period.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{OReilly:2012:SDF,
author = "Ma{\l}gorzata M. O'Reilly and Nigel G. Bean",
title = "Stochastic 2-dimensional fluid model",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "45--45",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185436",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bini:2012:SQM,
author = "D. Bini and B. Meini and S. Steff{\'e} and J. F.
P{\'e}rez and B. {Van Houdt}",
title = "{SMCSolver} and {Q-MAM}: tools for matrix-analytic
methods",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "46--46",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185437",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Matrix-analytic methods have advanced considerably
since the pioneering work of Marcel Neuts [6, 5] on
Quasi-Birth-Death (QBD), GI/M/1- and M/G/1- type Markov
chains (MCs). Especially the algorithms involved to
(iteratively) solve these structured Markov chains have
matured a lot, which has resulted in more efficient,
but also more complex algorithms [4, 1]. While the
first algorithms were straightforward to implement---as
they were based on simple functional iterations---more
advanced algorithms/features like cyclic-reduction, the
Newton iteration or the shift technique (to accelerate
convergence), require more effort; in particular for
GI/M/1- and M/G/1-type Markov chains. This has
motivated us to develop the Structured Markov Chain
Solver (SMCSolver) tool [2], which implements a large
number of basic and more advanced algorithms for
solving QBD, GI/M/1- and M/G/1-type MCs1 (as well as
the more general Non-Skip-Free M/G/1-type MCs). The
MATLAB version of the tool consists of a collection of
MATLAB functions, while the Fortran version is
accompanied by a graphical user-interface (GUI). Apart
from making these more advanced algorithms accessible
to non-specialists, the tool is also useful as a
platform for the development and study of new
algorithms and acceleration techniques. Since its
initial release in 2006, various extensions have been
made. In [3] different transformation techniques and
shift strategies are incorporated in order to speed up
and optimize the algorithms, while even more recently
an efficient Newton iteration for GI/M/1- and
M/G/1-type Markov chains was included [8].
Matrix-analytic methods have also been very effective
in the analysis of many queueing systems in both
discrete- and continuous-time. The Q-MAM tool [7] is a
collection of MATLAB functions that allows one to
compute the queue length, waiting time and delay
distribution of various queueing systems of infinite
size. It includes amongst others implementations of the
PH/PH/1, MAP/MAP/1, MAP/M/c, MAP/D/c, RAP/RAP/1,
MMAP[K]/PH[K]/1, MMAP[K]/SM[K]/1, SM[K]/PH[K]/1 (many
in both discrete- and continuous-time), where
state-of-the-art solution techniques are used to solve
these models efficiently. The Matlab version of the
SMCSolver and Q-MAM tool is available at
http://win.ua.ac.be/\%7Evanhoudt/ while the Fortran 90
version of the SMCSolver tool with the GUI can be
downloaded from http://bezout.dm.unipi.it/SMCSolver.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casale:2012:KTF,
author = "Giuliano Casale and Evgenia Smirni",
title = "{KPC-toolbox}: fitting {Markovian} arrival processes
and phase-type distributions with {MATLAB}",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "47--47",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185438",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cote:2012:JPS,
author = "Marco Cote and German Riano and Raha
Akhavan-Tabatabaei and Juan Fernando Perez and Andres
Sarmiento and Julio Goez",
title = "{jMarkov} package: a stochastic modeling tool",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "48--48",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185439",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When analyzing real life stochastic systems in most
cases is easier, cheaper and more effective to use
analytical models rather than studying the physical
system or a simulation model of it. The stochastic
modeling is a powerful tool that helps the analysis and
optimization of stochastic systems. However the use of
stochastic modeling is not widely spread in today's
industries and among practitioners. This lack of
acceptance is caused by two main reasons the first
being the curse of dimensionality, which is defined by
the number of states required to describe a system.
This number grows exponentially as the size of the
system increases. The second reason is the lack of
user-friendly and efficient software packages that
allow the modeling of the problem without involving the
user with the implementation of the solution algorithms
to solve it. The curse of dimensionality is a constant
problem that has been addressed by different approaches
through time, but it is not intended within the scope
of our work; our focus is on the latter issue. We
propose a generic solver that enables the user to focus
on modeling without getting involved in the complexity
required by the solution methods. We design an object
oriented framework for stochastic modeling with four
components namely, jMarkov which models Markov Chains,
jQBD which models Quasi Birth and Death Processes,
jPhase which models Phase Types Distributions and jMDP
which models Markov Decision Processes. We concentrate
all our effort on creating a software that allows the
user to model any kind of system like a Markov Chain,
QBD or MDP with fairly basic knowledge of programming.
To this end we separate the modeling part from the
solution algorithms; therefore the user only needs to
mathematically model the problem and the software will
do the rest. However, we leave the package with the
possibility that experienced users can code their own
solution algorithms; this is done since the package
only contains the most common algorithms found in the
literature. The software does not use external plain
files like '.txt' or '.dat' written with specific
commands, but rather it is based on OOP (Object
Oriented Programming). The main advantages of it
include implementation in Java framework, which allows
the computational representation of the model to be
very similar to its mathematical representation such
that it would become natural to pass from one to
another. Also the program possesses the usual
characteristics of Java such as the use of inheritance
and abstraction. Finally, Java is a high level
computational language so the user doesn't need to be
concerned about technical problems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Casolari:2012:SRC,
author = "Sara Casolari and Michele Colajanni and Stefania
Tosi",
title = "Selective resource characterization for evaluation of
system dynamics",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "51--60",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185441",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Management decisions to achieve peak performance
operations, scalability and availability in distributed
systems require a continuous statistical
characterization of data sets coming from server and
network monitors. Due to the increasing sizes of data
centers and their continuous dynamic changes, the
traditional approaches that work on all data sets in a
centralized way are impractical. We propose a strategy
for data processing that is able to limit the analysis
of the large sets of collected measures to a smaller
subset of significant information for a twofold
purpose: to classify the collected data sets in few
classes characterized by similar statistical behaviors,
to evaluate the dynamics of the overall system and its
most relevant changes. The proposed strategy works at
the level of server resources and of significant
aggregation of servers of the overall distributed
system. Several experimental results demonstrate the
feasibility of the proposed strategy that is validated
in real contexts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aceto:2012:RUE,
author = "Giuseppe Aceto and Antonio Pescap{\`e}",
title = "On the recent use of email through traffic and network
analysis: the impact of {OSNs}, new trends, and other
communication platforms",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "61--70",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185442",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Since the late 1971 --- when Ray Tomlinson invented
Internet-based email and sent the first message on
ARPANET --- email technology has evolved a lot, and
nowadays it is one of the most widely used applications
on the Internet. Despite this primacy, during the last
years other ways to exchange messages have been used by
Internet users (e.g. Instant Messaging, Social
Networks, microblogs, etc.). In this paper we propose a
methodology based on heterogeneous data sources to
analyze the amount of traffic associated with emails in
order to gain knowledge on the use of email by Internet
users in the last years. We consider real traffic
traces that are well known to the research community as
well as locally captured, and discuss them in the light
of other related phenomena: social networks adoption,
online advertising trends, abusive email spreads,
etc..We discuss the trend of email traffic in the last
10 years and we provide explanations related to the
impact, on the email usage, of the utilization of other
communication platforms. This work represents a first
step towards a framework in which to analyze the trend
of the email traffic and the associated phenomena as
well as the understanding of the upcoming novel
communications behavior of Internet users.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Distefano:2012:DAB,
author = "Salvatore Distefano and Antonio Puliafito and Kishor
S. Trivedi",
title = "Dynamic aspects and behaviors of complex systems in
performance and reliability assessment",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "71--78",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185443",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Reliability and performance evaluation are important,
often mandatory, steps in designing and analyzing
(critical) systems. In such cases, accurate models are
required to adequately take into account interference
or dependent behaviors affecting the system, its parts
and the external environment, especially if the system
experiences high complexity. The techniques and tools
to adopt in the evaluation have to adequately fit the
problem considered. The main goal of this paper is to
identify the dynamic-dependent aspects that can affect
the reliability and performance of a system. Starting
from the concept of dependence at the basis of system
decomposition, an analytic framework and some of the
most important dynamic-dependent aspects and behaviors
are characterized in terms of both dynamic reliability
and performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mahmud:2012:CST,
author = "Nidhal Mahmud and Martin Walker and Yiannis
Papadopoulos",
title = "Compositional synthesis of temporal fault trees from
state machines",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "79--88",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185444",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Dependability analysis of a dynamic system which is
embedded with several complex interrelated components
raises two main problems. First, it is difficult to
represent in a single coherent and complete picture how
the system and its constituent parts behave in
conditions of failure. Second, the analysis can be
unmanageable due to a considerable number of failure
events, which increases with the number of components
involved. To remedy this problem, in this paper we
outline an analysis approach that converts failure
behavioural models (state machines) to temporal fault
trees (TFTs), which can then be analysed using Pandora
--- a recent technique for introducing temporal logic
to fault trees. The approach is compositional and
potentially more scalable, as it relies on the
synthesis of large system TFTs from smaller component
TFTs. We show, by using a Generic Triple Redundant
(GTR) system, how the approach enables a more accurate
and full analysis of an increasingly complex system.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Abundo:2012:ACP,
author = "Marco Abundo and Valeria Cardellini and Francesco {Lo
Presti}",
title = "Admission control policies for a multi-class
{QoS}-aware service oriented architecture",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "89--98",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185445",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the service computing paradigm, a service broker
can build new applications by composing
network-accessible services offered by loosely coupled
independent providers. In this paper, we address the
problem of providing a service broker, which offers to
prospective users a composite service with a range of
different Quality of Service (QoS) classes, with a
forward-looking admission control policy based on
Markov Decision Processes (MDP). This mechanism allows
the broker to decide whether to accept or reject a new
potential user in such a way to maximize its gain while
guaranteeing non-functional QoS requirements to its
already admitted users. We model the broker using a
continuous-time MDP and consider various techniques
suitable to solve both infinite-horizon and
finite-horizon MDPs. To assess the effectiveness of the
MDP-based admission control for the service broker, we
present simulation results where we compare the optimal
decisions obtained by the analytical solution of the
MDP with other admission control policies. To deal with
large problem instances, we also propose a heuristic
policy for the MDP solution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Persona:2012:HQM,
author = "Vittoria de Nitto Person{\`a}",
title = "Heuristics for {QoS} maintenance: adaptive policies in
differentiated services wireless networks",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "99--107",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185446",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The fluctuation in resource availability demands for
adaptive behavior in wireless environments. The problem
is exacerbated by the differentiated services with
different quality demands. We present the MATS+ scheme,
an adaptive bandwidth allocation and admission control
algorithm for mobile integrated services networks. This
extends the recently proposed MATS scheme [11] to
include non-real time classes and a per-class
utilization control. We define an analytical model and
performance metrics to evaluate the proposed scheme.
The efficiency and flexibility of the analytical model
allows conducting several experiments in a real word
scenario by changing different system parameters. From
the obtained results we define an interesting
heuristics to initialize the scheme guaranteeing QoS
requirements and to maintain the QoS while adapting to
environment changing conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anceaume:2012:PEL,
author = "Emmanuelle Anceaume and Romaric Ludinard and Bruno
Sericola",
title = "Performance evaluation of large-scale dynamic
systems",
journal = j-SIGMETRICS,
volume = "39",
number = "4",
pages = "108--117",
month = apr,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2185395.2185447",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:38 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we present an in-depth study of the
dynamicity and robustness properties of large-scale
distributed systems, and in particular of peer-to-peer
systems. When designing such systems, two major issues
need to be faced. First, population of these systems
evolves continuously (nodes can join and leave the
system as often as they wish without any central
authority in charge of their control), and second,
these systems being open, one needs to defend against
the presence of malicious nodes that try to subvert the
system. Given robust operations and adversarial
strategies, we propose an analytical model of the local
behavior of clusters, based on Markov chains. This
local model provides an evaluation of the impact of
malicious behaviors on the correctness of the system.
Moreover, this local model is used to evaluate
analytically the performance of the global system,
allowing to characterize the global behavior of the
system with respect to its dynamics and to the presence
of malicious nodes and then to validate our approach.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Patel:2012:PIF,
author = "Naresh M. Patel",
title = "Performance implications of flash and storage class
memories",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "1--2",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254758",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The storage industry has seen incredible growth in
data storage needs by both consumers and enterprises.
Long-term technology trends mean that the data deluge
will continue well into the future. These trends
include the big-data trend (driven by data mining
analytics, high-bandwidth needs, and large content
repositories), server virtualization, cloud storage,
and Flash. We will cover how Flash and storage class
memories (SCM) interact with some of these major trends
from a performance perspective.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2012:HPC,
author = "Zhen Liu",
title = "High-performance computing in mobile services",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "3--4",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254759",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "With the ever increasing popularity of smart phones,
mobile services have been evolving rapidly to allow
users to enjoy localized and personalized experiences.
Users can discover local information and keep connected
with family and friends on the go, and ultimately to
experience the convergence of cyber space and physical
world where digital technologies are interwoven into
the day-to-day life. A pivotal component of such a
cyber-physical convergence is the contextual
intelligence. The extraction and dissemination of
contextual information around users is the key for the
cyber capabilities to be applied to physical activities
and for the cyber world to better reflect the physical
reality. In this talk, we shall address some issues
arising from context-based mobile services. In
particular, we discuss how mobility impacts contextual
relevancy and personalization in mobile services. The
relevancy and timeliness of contextual information not
only are essential for these services to deliver great
user experiences, but also put significant computation
pressure on service infrastructure that processes
continuous data streams in real time and disseminate
relevant data to a large amount of mobile users. This
talk will explore the challenges and opportunities for
high-performance computing in mobile services. Based on
key findings from large-scale mobile measurement data,
the talk will analyze the tradeoff of different
computing architectures, present case studies of
scalable system design and implementation for
personalized mobile services, and conclude with open
challenges for the broad research community in
performance measurement and modeling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2012:DTM,
author = "Jian Tan and Xiaoqiao Meng and Li Zhang",
title = "Delay tails in {MapReduce} scheduling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "5--16",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254761",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MapReduce/Hadoop production clusters exhibit
heavy-tailed characteristics for job processing times.
These phenomena are resultant of the workload features
and the adopted scheduling algorithms. Analytically
understanding the delays under different schedulers for
MapReduce can facilitate the design and deployment of
large Hadoop clusters. The map and reduce tasks of a
MapReduce job have fundamental difference and tight
dependence between them, complicating the analysis.
This also leads to an interesting starvation problem
with the widely used Fair Scheduler due to its greedy
approach to launching reduce tasks. To address this
issue, we design and implement Coupling Scheduler,
which gradually launches reduce tasks depending on map
task progresses. Real experiments demonstrate
improvements to job response times by up to an order of
magnitude. Based on extensive measurements and source
code investigations, we propose analytical models for
the default FIFO and Fair Scheduler as well as our
implemented Coupling Scheduler. For a class of
heavy-tailed map service time distributions, i.e.,
regularly varying of index -a, we derive the
distribution tail of the job processing delay under the
three schedulers, respectively. The default FIFO
Scheduler causes the delay to be regularly varying of
index -a+1. Interestingly, we discover a criticality
phenomenon for Fair Scheduler, the delay under which
can change from regularly varying of index -a to -a+1,
depending on the maximum number of reduce tasks of a
job. Other more subtle behaviors also exist. In
contrast, the delay distribution tail under Coupling
Scheduler can be one order lower than Fair Scheduler
under some conditions, implying a better performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2012:OQS,
author = "Devavrat Shah and Neil Walton and Yuan Zhong",
title = "Optimal queue-size scaling in switched networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "17--28",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254762",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a switched (queueing) network in which
there are constraints on which queues may be served
simultaneously; such networks have been used to
effectively model input-queued switches and wireless
networks. The scheduling policy for such a network
specifies which queues to serve at any point in time,
based on the current state or past history of the
system. In the main result of this paper, we provide a
new class of online scheduling policies that achieve
optimal average queue-size scaling for a class of
switched networks including input-queued switches. In
particular, it establishes the validity of a conjecture
about optimal queue-size scaling for input-queued
switches.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hyytia:2012:MSH,
author = "Esa Hyyti{\"a} and Samuli Aalto and Aleksi Penttinen",
title = "Minimizing slowdown in heterogeneous size-aware
dispatching systems",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "29--40",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254763",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a system of parallel queues where tasks
are assigned (dispatched) to one of the available
servers upon arrival. The dispatching decision is based
on the full state information, i.e., on the sizes of
the new and existing jobs. We are interested in
minimizing the so-called mean slowdown criterion
corresponding to the mean of the sojourn time divided
by the processing time. Assuming no new jobs arrive,
the shortest-processing-time-product (SPTP) schedule is
known to minimize the slowdown of the existing jobs.
The main contribution of this paper is three-fold: (1)
To show the optimality of SPTP with respect to slowdown
in a single server queue under Poisson arrivals; (2) to
derive the so-called size-aware value functions for
M/G/1-FIFO/LIFO/SPTP with general holding costs of
which the slowdown criterion is a special case; and (3)
to utilize the value functions to derive efficient
dispatching policies so as to minimize the mean
slowdown in a heterogeneous server system. The derived
policies offer a significantly better performance than
e.g., the size-aware-task-assignment with equal load
(SITA-E) and least-work-left (LWL) policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Leconte:2012:BGS,
author = "Mathieu Leconte and Marc Lelarge and Laurent
Massouli{\'e}",
title = "Bipartite graph structures for efficient balancing of
heterogeneous loads",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "41--52",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254764",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper considers large scale distributed content
service platforms, such as peer-to-peer video-on-demand
systems. Such systems feature two basic resources,
namely storage and bandwidth. Their efficiency
critically depends on two factors: (i) content
replication within servers, and (ii) how incoming
service requests are matched to servers holding
requested content. To inform the corresponding design
choices, we make the following contributions. We first
show that, for underloaded systems, so-called
proportional content placement with a simple greedy
strategy for matching requests to servers ensures full
system efficiency provided storage size grows
logarithmically with the system size. However, for
constant storage size, this strategy undergoes a phase
transition with severe loss of efficiency as system
load approaches criticality. To better understand the
role of the matching strategy in this performance
degradation, we characterize the asymptotic system
efficiency under an optimal matching policy. Our
analysis shows that -in contrast to greedy matching-
optimal matching incurs an inefficiency that is
exponentially small in the server storage size, even at
critical system loads. It further allows a
characterization of content replication policies that
minimize the inefficiency. These optimal policies,
which differ markedly from proportional placement, have
a simple structure which makes them implementable in
practice. On the methodological side, our analysis of
matching performance uses the theory of local weak
limits of random graphs, and highlights a novel
characterization of matching numbers in bipartite
graphs, which may both be of independent interest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Atikoglu:2012:WAL,
author = "Berk Atikoglu and Yuehai Xu and Eitan Frachtenberg and
Song Jiang and Mike Paleczny",
title = "Workload analysis of a large-scale key-value store",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "53--64",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254766",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Key-value stores are a vital component in many
scale-out enterprises, including social networks,
online retail, and risk analysis. Accordingly, they are
receiving increased attention from the research
community in an effort to improve their performance,
scalability, reliability, cost, and power consumption.
To be effective, such efforts require a detailed
understanding of realistic key-value workloads. And yet
little is known about these workloads outside of the
companies that operate them. This paper aims to address
this gap. To this end, we have collected detailed
traces from Facebook's Memcached deployment, arguably
the world's largest. The traces capture over 284
billion requests from five different Memcached use
cases over several days. We analyze the workloads from
multiple angles, including: request composition, size,
and rate; cache efficacy; temporal patterns; and
application use cases. We also propose a simple model
of the most representative trace to enable the
generation of more realistic synthetic workloads by the
community. Our analysis details many characteristics of
the caching workload. It also reveals a number of
surprises: a GET/SET ratio of 30:1 that is higher than
assumed in the literature; some applications of
Memcached behave more like persistent storage than a
cache; strong locality metrics, such as keys accessed
many millions of times a day, do not always suffice for
a high hit rate; and there is still room for efficiency
and hit rate improvements in Memcached's
implementation. Toward the last point, we make several
suggestions that address the exposed deficiencies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shafiq:2012:FLC,
author = "Muhammad Zubair Shafiq and Lusheng Ji and Alex X. Liu
and Jeffrey Pang and Jia Wang",
title = "A first look at cellular machine-to-machine traffic:
large scale measurement and characterization",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "65--76",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254767",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cellular network based Machine-to-Machine (M2M)
communication is fast becoming a market-changing force
for a wide spectrum of businesses and applications such
as telematics, smart metering, point-of-sale terminals,
and home security and automation systems. In this
paper, we aim to answer the following important
question: Does traffic generated by M2M devices impose
new requirements and challenges for cellular network
design and management? To answer this question, we take
a first look at the characteristics of M2M traffic and
compare it with traditional smartphone traffic. We have
conducted our measurement analysis using a week-long
traffic trace collected from a tier-1 cellular network
in the United States. We characterize M2M traffic from
a wide range of perspectives, including temporal
dynamics, device mobility, application usage, and
network performance. Our experimental results show that
M2M traffic exhibits significantly different patterns
than smartphone traffic in multiple aspects. For
instance, M2M devices have a much larger ratio of
uplink to downlink traffic volume, their traffic
typically exhibits different diurnal patterns, they are
more likely to generate synchronized traffic resulting
in bursty aggregate traffic volumes, and are less
mobile compared to smartphones. On the other hand, we
also find that M2M devices are generally competing with
smartphones for network resources in co-located
geographical regions. These and other findings suggest
that better protocol design, more careful spectrum
allocation, and modified pricing schemes may be needed
to accommodate the rise of M2M devices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Han:2012:BPB,
author = "Jinyoung Han and Seungbae Kim and Taejoong Chung and
Ted Taekyoung Kwon and Hyun-chul Kim and Yanghee Choi",
title = "Bundling practice in {BitTorrent}: what, how, and
why",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "77--88",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254768",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We conduct comprehensive measurements on the current
practice of content bundling to understand the
structural patterns of torrents and the participant
behaviors of swarms on one of the largest BitTorrent
portals: The Pirate Bay. From the datasets of the 120K
torrents and 14.8M peers, we investigate what
constitutes torrents and how users participate in
swarms from the perspective of bundling, across
different content categories: Movie, TV, Porn, Music,
Application, Game and E-book. In particular, we focus
on: (1) how prevalent content bundling is, (2) how and
what files are bundled into torrents, (3) what
motivates publishers to bundle files, and (4) how peers
access the bundled files. We find that over 72\% of
BitTorrent torrents contain multiple files, which
indicates that bundling is widely used for file
sharing. We reveal that profit-driven BitTorrent
publishers who promote their own web sites for
financial gains like advertising tend to prefer to use
the bundling. We also observe that most files (94\%) in
a bundle torrent are selected by users and the bundle
torrents are more popular than the single (or
non-bundle) ones on average. Overall, there are notable
differences in the structural patterns of torrents and
swarm characteristics (i) across different content
categories and (ii) between single and bundle
torrents.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gan:2012:EEC,
author = "Lingwen Gan and Anwar Walid and Steven Low",
title = "Energy-efficient congestion control",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "89--100",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254770",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Various link bandwidth adjustment mechanisms are being
developed to save network energy. However, their
interaction with congestion control can significantly
reduce network throughput, and is not well understood.
We firstly put forward a framework to study this
interaction, and then propose an easily implementable
dynamic bandwidth adjustment (DBA) mechanism for the
links. In DBA, each link updates its bandwidth
according to an integral control law to match its
average buffer size with a target buffer size. We prove
that DBA reduces link bandwidth without sacrificing
throughput---DBA only turns off excess bandwidth---in
the presence of congestion control. Preliminary ns2
simulations confirm this result.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Jelenkovic:2012:UAD,
author = "Predrag R. Jelenkovic and Evangelia D. Skiani",
title = "Uniform approximation of the distribution for the
number of retransmissions of bounded documents",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "101--112",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254771",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Retransmission-based failure recovery represents a
primary approach in existing communication networks, on
all protocol layers, that guarantees data delivery in
the presence of channel failures. Contrary to the
traditional belief that the number of retransmissions
is geometrically distributed, a new phenomenon was
discovered recently, which shows that retransmissions
can cause long (-tailed) delays and instabilities even
if all traffic and network characteristics are
light-tailed, e.g., exponential or Gaussian. Since the
preceding finding holds under the assumption that data
sizes have infinite support, in this paper we
investigate the practically important case of bounded
data units {0$<$}= L$_b$ {$<$}= b. To this end, we
provide an explicit and uniform characterization of the
entire body of the retransmission distribution Pr[N$_b$
{$>$} n] in both n and b. This rigorous approximation
clearly demonstrates the previously observed transition
from power law distributions in the main body to
exponential tails. The accuracy of our approximation is
validated with a number of simulation experiments.
Furthermore, the results highlight the importance of
wisely determining the size of data units in order to
accommodate the performance needs in
retransmission-based systems. From a broader
perspective, this study applies to any other system,
e.g., computing, where restart mechanisms are employed
after a job processing failure.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{VanHoudt:2012:FLA,
author = "Benny {Van Houdt} and Luca Bortolussi",
title = "Fluid limit of an asynchronous optical packet switch
with shared per link full range wavelength conversion",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "113--124",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254772",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an asynchronous all optical packet switch
(OPS) where each link consists of $N$ wavelength
channels and a pool of $ C \leq N$ full range tunable
wavelength converters. Under the assumption of Poisson
arrivals with rate \lambda (per wavelength channel) and
exponential packet lengths, we determine a simple
closed-form expression for the limit of the loss
probabilities $ P_{\rm loss}(N)$ as $N$ tends to
infinity (while the load and conversion ratio $ \sigma
= C / N$ remains fixed). More specifically, for $
\sigma \leq \lambda^2$ the loss probability tends to $
(\lambda^2 - \sigma) / \lambda (1 + \lambda)$, while
for $ \sigma > \lambda^2$ the loss tends to zero. We
also prove an insensitivity result when the exponential
packet lengths are replaced by certain classes of
phase-type distributions. A key feature of the
dynamical system (i.e., set of ODEs) that describes the
limit behavior of this OPS switch, is that its
right-hand side is discontinuous. To prove the
convergence, we therefore had to generalize some
existing result to the setting of piece-wise smooth
dynamical systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hua:2012:TOE,
author = "Nan Hua and Ashwin Lall and Baochun Li and Jun Xu",
title = "Towards optimal error-estimating codes through the
lens of {Fisher} information analysis",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "125--136",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254773",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Error estimating coding (EEC) has recently been
established as an important tool to estimate bit error
rates in the transmission of packets over wireless
links, with a number of potential applications in
wireless networks. In this paper, we present an
in-depth study of error estimating codes through the
lens of Fisher information analysis and find that the
original EEC estimator fails to exploit the information
contained in its code to the fullest extent. Motivated
by this discovery, we design a new estimator for the
original EEC algorithm, which significantly improves
the estimation accuracy, and is empirically very close
to the Cramer-Rao bound. Following this path, we
generalize the EEC algorithm to a new family of
algorithms called gEEC generalized EEC. These
algorithms can be tuned to hold 25-35\% more
information with the same overhead, and hence deliver
even better estimation accuracy---close to optimal, as
evidenced by the Cramer-Rao bound. Our theoretical
analysis and assertions are supported by extensive
experimental evaluation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vulimiri:2012:HWC,
author = "Ashish Vulimiri and Gul A. Agha and Philip Brighten
Godfrey and Karthik Lakshminarayanan",
title = "How well can congestion pricing neutralize denial of
service attacks?",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "137--150",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254775",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Denial of service protection mechanisms usually
require classifying malicious traffic, which can be
difficult. Another approach is to price scarce
resources. However, while congestion pricing has been
suggested as a way to combat DoS attacks, it has not
been shown quantitatively how much damage a malicious
player could cause to the utility of benign
participants. In this paper, we quantify the protection
that congestion pricing affords against DoS attacks,
even for powerful attackers that can control their
packets' routes. Specifically, we model the limits on
the resources available to the attackers in three
different ways and, in each case, quantify the maximum
amount of damage they can cause as a function of their
resource bounds. In addition, we show that congestion
pricing is provably superior to fair queueing in attack
resilience.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Niu:2012:PCB,
author = "Di Niu and Chen Feng and Baochun Li",
title = "Pricing cloud bandwidth reservations under demand
uncertainty",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "151--162",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254776",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a public cloud, bandwidth is traditionally priced
in a pay-as-you-go model. Reflecting the recent trend
of augmenting cloud computing with bandwidth
guarantees, we consider a novel model of cloud
bandwidth allocation and pricing when explicit
bandwidth reservation is enabled. We argue that a
tenant's utility depends not only on its bandwidth
usage, but more importantly on the portion of its
demand that is satisfied with a performance guarantee.
Our objective is to determine the optimal policy for
pricing cloud bandwidth reservations, in order to
maximize social welfare, i.e., the sum of the expected
profits that can be made by all tenants and the cloud
provider, even with the presence of demand uncertainty.
The problem turns out to be a large-scale network
optimization problem with a coupled objective function.
We propose two new distributed solutions --- based on
chaotic equation updates and cutting-plane methods ---
that prove to be more efficient than existing solutions
based on consistency pricing and subgradient methods.
In addition, we address the practical challenge of
forecasting demand statistics, required by our
optimization problem as input. We propose a factor
model for near-future demand prediction, and test it on
a real-world video workload dataset. All included, we
have designed a fully computerized trading environment
for cloud bandwidth reservations, which operates
effectively at a fine granularity of as small as ten
minutes in our trace-driven simulations.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{El-Sayed:2012:TMD,
author = "Nosayba El-Sayed and Ioan A. Stefanovici and George
Amvrosiadis and Andy A. Hwang and Bianca Schroeder",
title = "Temperature management in data centers: why some
(might) like it hot",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "163--174",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254778",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The energy consumed by data centers is starting to
make up a significant fraction of the world's energy
consumption and carbon emissions. A large fraction of
the consumed energy is spent on data center cooling,
which has motivated a large body of work on temperature
management in data centers. Interestingly, a key aspect
of temperature management has not been well understood:
controlling the setpoint temperature at which to run a
data center's cooling system. Most data centers set
their thermostat based on (conservative) suggestions by
manufacturers, as there is limited understanding of how
higher temperatures will affect the system. At the same
time, studies suggest that increasing the temperature
setpoint by just one degree could save 2--5\% of the
energy consumption. This paper provides a multi-faceted
study of temperature management in data centers. We use
a large collection of field data from different
production environments to study the impact of
temperature on hardware reliability, including the
reliability of the storage subsystem, the memory
subsystem and server reliability as a whole. We also
use an experimental testbed based on a thermal chamber
and a large array of benchmarks to study two other
potential issues with higher data center temperatures:
the effect on server performance and power. Based on
our findings, we make recommendations for temperature
management in data centers, that create the potential
for saving energy, while limiting negative effects on
system reliability and performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Liu:2012:RCA,
author = "Zhenhua Liu and Yuan Chen and Cullen Bash and Adam
Wierman and Daniel Gmach and Zhikui Wang and Manish
Marwah and Chris Hyser",
title = "Renewable and cooling aware workload management for
sustainable data centers",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "175--186",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254779",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recently, the demand for data center computing has
surged, increasing the total energy footprint of data
centers worldwide. Data centers typically comprise
three subsystems: IT equipment provides services to
customers; power infrastructure supports the IT and
cooling equipment; and the cooling infrastructure
removes heat generated by these subsystems. This work
presents a novel approach to model the energy flows in
a data center and optimize its operation.
Traditionally, supply-side constraints such as energy
or cooling availability were treated independently from
IT workload management. This work reduces electricity
cost and environmental impact using a holistic approach
that integrates renewable supply, dynamic pricing, and
cooling supply including chiller and outside air
cooling, with IT workload planning to improve the
overall sustainability of data center operations.
Specifically, we first predict renewable energy as well
as IT demand. Then we use these predictions to generate
an IT workload management plan that schedules IT
workload and allocates IT resources within a data
center according to time varying power supply and
cooling efficiency. We have implemented and evaluated
our approach using traces from real data centers and
production systems. The results demonstrate that our
approach can reduce both the recurring power costs and
the use of non-renewable energy by as much as 60\%
compared to existing techniques, while still meeting
the Service Level Agreements.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2012:ESD,
author = "Di Wang and Chuangang Ren and Anand Sivasubramaniam
and Bhuvan Urgaonkar and Hosam Fathy",
title = "Energy storage in datacenters: what, where, and how
much?",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "187--198",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254780",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy storage --- in the form of UPS units --- in a
datacenter has been primarily used to fail-over to
diesel generators upon power outages. There has been
recent interest in using these Energy Storage Devices
(ESDs) for demand-response (DR) to either shift peak
demand away from high tariff periods, or to shave
demand allowing aggressive under-provisioning of the
power infrastructure. All such prior work has only
considered a single/specific type of ESD (typically
re-chargeable lead-acid batteries), and has only
employed them at a single level of the power delivery
network. Continuing technological advances have
provided us a plethora of competitive ESD options
ranging from ultra-capacitors, to different kinds of
batteries, flywheels and even compressed air-based
storage. These ESDs offer very different trade-offs
between their power and energy costs, densities,
lifetimes, and energy efficiency, among other factors,
suggesting that employing hybrid combinations of these
may allow more effective DR than with a single
technology. Furthermore, ESDs can be placed at
different, and possibly multiple, levels of the power
delivery hierarchy with different associated
trade-offs. To our knowledge, no prior work has studied
the extensive design space involving multiple ESD
technology provisioning and placement options. This
paper intends to fill this critical void, by presenting
a theoretical framework for capturing important
characteristics of different ESD technologies, the
trade-offs of placing them at different levels of the
power hierarchy, and quantifying the resulting
cost-benefit trade-offs as a function of workload
properties.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shah:2012:RCU,
author = "Devavrat Shah and Tauhid Zaman",
title = "Rumor centrality: a universal source detector",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "199--210",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254782",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of detecting the source of a
rumor (information diffusion) in a network based on
observations about which set of nodes possess the
rumor. In a recent work [10], this question was
introduced and studied. The authors proposed rumor
centrality as an estimator for detecting the source.
They establish it to be the maximum likelihood
estimator with respect to the popular Susceptible
Infected (SI) model with exponential spreading time for
regular trees. They showed that as the size of infected
graph increases, for a line ($2$-regular tree) graph,
the probability of source detection goes to $0$ while
for $d$-regular trees with $ d \geq 3$ the probability
of detection, say \alpha $_d$, remains bounded away
from $0$ and is less than $ 1 / 2$. Their results,
however stop short of providing insights for the
heterogeneous setting such as irregular trees or the SI
model with non-exponential spreading times. This paper
overcomes this limitation and establishes the
effectiveness of rumor centrality for source detection
for generic random trees and the SI model with a
generic spreading time distribution. The key result is
an interesting connection between a multi-type
continuous time branching process (an equivalent
representation of a generalized Polya's urn, cf. [1])
and the effectiveness of rumor centrality. Through
this, it is possible to quantify the detection
probability precisely. As a consequence, we recover all
the results of [10] as a special case and more
importantly, we obtain a variety of results
establishing the universality of rumor centrality in
the context of tree-like graphs and the SI model with a
generic spreading time distribution.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Netrapalli:2012:LGE,
author = "Praneeth Netrapalli and Sujay Sanghavi",
title = "Learning the graph of epidemic cascades",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "211--222",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254783",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of finding the graph on which
an epidemic spreads, given only the times when each
node gets infected. While this is a problem of central
importance in several contexts --- offline and online
social networks, e-commerce, epidemiology --- there has
been very little work, analytical or empirical, on
finding the graph. Clearly, it is impossible to do so
from just one epidemic; our interest is in learning the
graph from a small number of independent epidemics. For
the classic and popular ``independent cascade''
epidemics, we analytically establish sufficient
conditions on the number of epidemics for both the
global maximum-likelihood (ML) estimator, and a natural
greedy algorithm to succeed with high probability. Both
results are based on a key observation: the global
graph learning problem decouples into $n$ local
problems one for each node. For a node of degree $d$,
we show that its neighborhood can be reliably found
once it has been infected $ O(d^2 \log n)$ times (for
ML on general graphs) or $ O(d \log n)$ times (for
greedy on trees). We also provide a corresponding
information-theoretic lower bound of $ \Omega (d \log
n)$; thus our bounds are essentially tight.
Furthermore, if we are given side-information in the
form of a super-graph of the actual graph (as is often
the case), then the number of epidemic samples required
--- in all cases --- becomes independent of the network
size $n$.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Milling:2012:NFR,
author = "Chris Milling and Constantine Caramanis and Shie
Mannor and Sanjay Shakkottai",
title = "Network forensics: random infection vs spreading
epidemic",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "223--234",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254784",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Computer (and human) networks have long had to contend
with spreading viruses. Effectively controlling or
curbing an outbreak requires understanding the dynamics
of the spread. A virus that spreads by taking advantage
of physical links or user-acquaintance links on a
social network can grow explosively if it spreads
beyond a critical radius. On the other hand, random
infections (that do not take advantage of network
structure) have very different propagation
characteristics. If too many machines (or humans) are
infected, network structure becomes essentially
irrelevant, and the different spreading modes appear
identical. When can we distinguish between mechanics of
infection? Further, how can this be done efficiently?
This paper studies these two questions. We provide
sufficient conditions for different graph topologies,
for when it is possible to distinguish between a random
model of infection and a spreading epidemic model, with
probability of misclassification going to zero. We
further provide efficient algorithms that are
guaranteed to work in different regimes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Kim:2012:WGB,
author = "Hyojun Kim and Moonkyung Ryu and Umakishore
Ramachandran",
title = "What is a good buffer cache replacement scheme for
mobile flash storage?",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "235--246",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254786",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Smartphones are becoming ubiquitous and powerful. The
Achilles' heel in such devices that limits performance
is the storage. Low-end flash memory is the storage
technology of choice in such devices due to energy,
size, and cost considerations. In this paper, we take a
critical look at the performance of flash on
smartphones for mobile applications. Specifically, we
ask the question whether the state-of-the-art buffer
cache replacement schemes proposed thus far (both
flash-agnostic and flash-aware ones) are the right ones
for mobile flash storage. To answer this question, we
first expose the limitations of current buffer cache
performance evaluation methods, and propose a novel
evaluation framework that is a hybrid between
trace-driven simulation and real implementation of such
schemes inside an operating system. Such an evaluation
reveals some unexpected and surprising insights on the
performance of buffer management schemes that
contradicts conventional wisdom. Armed with this
knowledge, we propose a new buffer cache replacement
scheme called SpatialClock. Using our evaluation
framework, we show the superior performance of
SpatialClock relative to the state-of-the-art for
mobile flash storage.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Alizadeh:2012:VRL,
author = "Mohammad Alizadeh and Adel Javanmard and Shang-Tse
Chuang and Sundar Iyer and Yi Lu",
title = "Versatile refresh: low complexity refresh scheduling
for high-throughput multi-banked {eDRAM}",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "247--258",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254787",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multi-banked embedded DRAM (eDRAM) has become
increasingly popular in high-performance systems.
However, the data retention problem of eDRAM is
exacerbated by the larger number of banks and the
high-performance environment in which it is deployed:
The data retention time of each memory cell decreases
while the number of cells to be refreshed increases.
For this, multi-bank designs offer a concurrent refresh
mode, where idle banks can be refreshed concurrently
during read and write operations. However, conventional
techniques such as periodically scheduling
refreshes---with priority given to refreshes in case of
conflicts with reads or writes---have variable
performance, increase read latency, and can perform
poorly in worst case memory access patterns. We propose
a novel refresh scheduling algorithm that is
low-complexity, produces near-optimal throughput with
universal guarantees, and is tolerant to bursty memory
access patterns. The central idea is to decouple the
scheduler into two simple-to-implement modules: one
determines which cell to refresh next and the other
determines when to force an idle cycle in all banks. We
derive necessary and sufficient conditions to guarantee
data integrity for all access patterns, with any given
number of banks, rows per bank, read/write ports and
data retention time. Our analysis shows that there is a
tradeoff between refresh overhead and burst tolerance
and characterizes this tradeoff precisely. The
algorithm is shown to be near-optimal and achieves, for
instance, 76.6\% reduction in worst-case refresh
overhead from the periodic refresh algorithm for a
250MHz eDRAM with 10us retention time and 16 banks each
with 128 rows. Simulations with Apex-Map synthetic
benchmarks and switch lookup table traffic show that VR
can almost completely hide the refresh overhead for
memory accesses with moderate-to-high multiplexing
across memory banks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bhattacharya:2012:DLI,
author = "Suparna Bhattacharya and Karthick Rajamani and K.
Gopinath and Manish Gupta",
title = "Does lean imply green?: a study of the power
performance implications of {Java} runtime bloat",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "259--270",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254789",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The presence of software bloat in large flexible
software systems can hurt energy efficiency. However,
identifying and mitigating bloat is fairly effort
intensive. To enable such efforts to be directed where
there is a substantial potential for energy savings, we
investigate the impact of bloat on power consumption
under different situations. We conduct the first
systematic experimental study of the joint
power-performance implications of bloat across a range
of hardware and software configurations on modern
server platforms. The study employs controlled
experiments to expose different effects of a common
type of Java runtime bloat, excess temporary objects,
in the context of the SPECPower\_ssj2008 workload. We
introduce the notion of equi-performance power
reduction to characterize the impact, in addition to
peak power comparisons. The results show a wide
variation in energy savings from bloat reduction across
these configurations. Energy efficiency benefits at
peak performance tend to be most pronounced when bloat
affects a performance bottleneck and non-bloated
resources have low energy-proportionality.
Equi-performance power savings are highest when bloated
resources have a high degree of energy proportionality.
We develop an analytical model that establishes a
general relation between resource pressure caused by
bloat and its energy efficiency impact under different
conditions of resource bottlenecks and energy
proportionality. Applying the model to different
``what-if'' scenarios, we predict the impact of bloat
reduction and corroborate these predictions with
empirical observations. Our work shows that the
prevalent software-only view of bloat is inadequate for
assessing its power-performance impact and instead
provides a full systems approach for reasoning about
its implications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lim:2012:DFQ,
author = "Seung-Hwan Lim and Jae-Seok Huh and Youngjae Kim and
Galen M. Shipman and Chita R. Das",
title = "{D}-factor: a quantitative model of application
slow-down in multi-resource shared systems",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "271--282",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254790",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scheduling multiple jobs onto a platform enhances
system utilization by sharing resources. The benefits
from higher resource utilization include reduced cost
to construct, operate, and maintain a system, which
often include energy consumption. Maximizing these
benefits, while satisfying performance limits, comes at
a price --- resource contention among jobs increases
job completion time. In this paper, we analyze
slow-downs of jobs due to contention for multiple
resources in a system; referred to as dilation factor.
We observe that multiple-resource contention creates
non-linear dilation factors of jobs. From this
observation, we establish a general quantitative model
for dilation factors of jobs in multi-resource systems.
A job is characterized by a vector-valued loading
statistics and dilation factors of a job set are given
by a quadratic function of their loading vectors. We
demonstrate how to systematically characterize a job,
maintain the data structure to calculate the dilation
factor (loading matrix), and calculate the dilation
factor of each job. We validated the accuracy of the
model with multiple processes running on a native Linux
server, virtualized servers, and with multiple
MapReduce workloads co-scheduled in a cluster.
Evaluation with measured data shows that the D-factor
model has an error margin of less than 16\%. We also
show that the model can be integrated with an existing
on-line scheduler to minimize the makespan of
workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yoo:2012:AAD,
author = "Wucherl Yoo and Kevin Larson and Lee Baugh and
Sangkyum Kim and Roy H. Campbell",
title = "{ADP}: automated diagnosis of performance pathologies
using hardware events",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "283--294",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254791",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Performance characterization of applications' hardware
behavior is essential for making the best use of
available hardware resources. Modern architectures
offer access to many hardware events that are capable
of providing information to reveal architectural
performance bottlenecks throughout the core and memory
hierarchy. These events can provide programmers with
unique and powerful insights into the causes of the
resource bottlenecks in their applications. However,
interpreting these events has been a significant
challenge. We present an automated system that uses
machine learning to identify an application's
performance problems. Our system provides programmers
with insights about the performance of their
applications while shielding them from the onerous task
of digesting hardware events. It uses a decision tree
algorithm, random forests on our micro-benchmarks to
fingerprint the performance problems. Our system
divides a profiled application into functions and
automatically classifies each function by the dominant
hardware resource bottlenecks. Using the
classifications from the hotspot functions, we were
able to achieve an average speedup of 1.73 from three
applications in the PARSEC benchmark suite. Our system
provides programmers with a guideline of where, what,
and how to fix the detected performance problems in
applications, which would have otherwise required
considerable architectural knowledge.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Xu:2012:PFS,
author = "Di Xu and Chenggang Wu and Pen-Chung Yew and Jianjun
Li and Zhenjiang Wang",
title = "Providing fairness on shared-memory multiprocessors
via process scheduling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "295--306",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254792",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Competition for shared memory resources on
multiprocessors is the most dominant cause for slowing
down applications and makes their performance varies
unpredictably. It exacerbates the need for Quality of
Service (QoS) on such systems. In this paper, we
propose a fair-progress process scheduling (FPS) policy
to improve system fairness. Its strategy is to force
the equally-weighted applications to have the same
amount of slowdown when they run concurrently. The
basic approach is to monitor the progress of all
applications at runtime. When we find an application
suffered more slowdown and accumulated less effective
work than others, we allocate more CPU time to give it
a better parity. Our policy also allows different
weights to different threads, and provides an effective
and robust tuner that allows the OS to freely make
tradeoffs between system fairness and higher
throughput. Evaluation results show that FPS can
significantly improve system fairness by an average of
53.5\% and 65.0\% on a 4-core processor with a private
cache and a 4-core processor with a shared cache,
respectively. The penalty is about 1.1\% and 1.6\% of
the system throughput. For memory-intensive workloads,
FPS also improves system fairness by an average of
45.2\% and 21.1\% on 4-core and 8-core system
respectively at the expense of a throughput loss of
about 2\%.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Figueiredo:2012:CCT,
author = "Daniel Figueiredo and Philippe Nain and Bruno Ribeiro
and Edmundo {de Souza e Silva} and Don Towsley",
title = "Characterizing continuous time random walks on time
varying graphs",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "307--318",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254794",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper we study the behavior of a continuous
time random walk (CTRW) on a stationary and ergodic
time varying dynamic graph. We establish conditions
under which the CTRW is a stationary and ergodic
process. In general, the stationary distribution of the
walker depends on the walker rate and is difficult to
characterize. However, we characterize the stationary
distribution in the following cases: (i) the walker
rate is significantly larger or smaller than the rate
in which the graph changes (time-scale separation),
(ii) the walker rate is proportional to the degree of
the node that it resides on (coupled dynamics), and
(iii) the degrees of node belonging to the same
connected component are identical (structural
constraints). We provide examples that illustrate our
theoretical findings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:BRW,
author = "Chul-Ho Lee and Xin Xu and Do Young Eun",
title = "Beyond random walk and {Metropolis--Hastings}
samplers: why you should not backtrack for unbiased
graph sampling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "319--330",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254795",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Graph sampling via crawling has been actively
considered as a generic and important tool for
collecting uniform node samples so as to consistently
estimate and uncover various characteristics of complex
networks. The so-called simple random walk with
re-weighting (SRW-rw) and Metropolis--Hastings (MH)
algorithm have been popular in the literature for such
unbiased graph sampling. However, an unavoidable
downside of their core random walks --- slow diffusion
over the space, can cause poor estimation accuracy. In
this paper, we propose non-backtracking random walk
with re-weighting (NBRW-rw) and MH algorithm with
delayed acceptance (MHDA) which are theoretically
guaranteed to achieve, at almost no additional cost,
not only unbiased graph sampling but also higher
efficiency (smaller asymptotic variance of the
resulting unbiased estimators) than the SRW-rw and the
MH algorithm, respectively. In particular, a remarkable
feature of the MHDA is its applicability for any
non-uniform node sampling like the MH algorithm, but
ensuring better sampling efficiency than the MH
algorithm. We also provide simulation results to
confirm our theoretical findings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Song:2012:CEM,
author = "Han Hee Song and Berkant Savas and Tae Won Cho and
Vacha Dave and Zhengdong Lu and Inderjit S. Dhillon and
Yin Zhang and Lili Qiu",
title = "Clustered embedding of massive social networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "331--342",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254796",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The explosive growth of social networks has created
numerous exciting research opportunities. A central
concept in the analysis of social networks is a
proximity measure, which captures the closeness or
similarity between nodes in the network. Despite much
research on proximity measures, there is a lack of
techniques to efficiently and accurately compute
proximity measures for large-scale social networks. In
this paper, we embed the original massive social graph
into a much smaller graph, using a novel dimensionality
reduction technique termed Clustered Spectral Graph
Embedding. We show that the embedded graph captures the
essential clustering and spectral structure of the
original graph and allow a wide range of analysis to be
performed on massive social graphs. Applying the
clustered embedding to proximity measurement of social
networks, we develop accurate, scalable, and flexible
solutions to three important social network analysis
tasks: proximity estimation, missing link inference,
and link prediction. We demonstrate the effectiveness
of our solutions to the tasks in the context of large
real-world social network datasets: Flickr,
LiveJournal, and MySpace with up to 2 million nodes and
90 million links.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cohen:2012:DLN,
author = "Edith Cohen and Graham Cormode and Nick Duffield",
title = "Don't let the negatives bring you down: sampling from
streams of signed updates",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "343--354",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254798",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Random sampling has been proven time and time again to
be a powerful tool for working with large data. Queries
over the full dataset are replaced by approximate
queries over the smaller (and hence easier to store and
manipulate) sample. The sample constitutes a flexible
summary that supports a wide class of queries. But in
many applications, datasets are modified with time, and
it is desirable to update samples without requiring
access to the full underlying datasets. In this paper,
we introduce and analyze novel techniques for sampling
over dynamic data, modeled as a stream of modifications
to weights associated with each key. While sampling
schemes designed for stream applications can often
readily accommodate positive updates to the dataset,
much less is known for the case of negative updates,
where weights are reduced or items deleted altogether.
We primarily consider the turnstile model of streams,
and extend classic schemes to incorporate negative
updates. Perhaps surprisingly, the modifications to
handle negative updates turn out to be natural and
seamless extensions of the well-known positive
update-only algorithms. We show that they produce
unbiased estimators, and we relate their performance to
the behavior of corresponding algorithms on insert-only
streams with different parameters. A careful analysis
is necessitated, in order to account for the fact that
sampling choices for one key now depend on the choices
made for other keys. In practice, our solutions turn
out to be efficient and accurate. Compared to recent
algorithms for L$_p$ sampling which can be applied to
this problem, they are significantly more reliable, and
dramatically faster.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ammar:2012:ERA,
author = "Ammar Ammar and Devavrat Shah",
title = "Efficient rank aggregation using partial data",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "355--366",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254799",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The need to rank items based on user input arises in
many practical applications such as elections, group
decision making and recommendation systems. The primary
challenge in such scenarios is to decide on a global
ranking based on partial preferences provided by users.
The standard approach to address this challenge is to
ask users to provide explicit numerical ratings
(cardinal information) of a subset of the items. The
main appeal of such an approach is the ease of
aggregation. However, the rating scale as well as the
individual ratings are often arbitrary and may not be
consistent from one user to another. A more natural
alternative to numerical ratings requires users to
compare pairs of items (ordinal information). On the
one hand, such comparisons provide an ``absolute''
indicator of the user's preference. On the other hand,
it is often hard to combine or aggregate these
comparisons to obtain a consistent global ranking. In
this work, we provide a tractable framework for
utilizing comparison data as well as first-order
marginal information (see Section 2) for the purpose of
ranking. We treat the available information as partial
samples from an unknown distribution over permutations.
We then reduce ranking problems of interest to
performing inference on this distribution.
Specifically, we consider the problems of (a) finding
an aggregate ranking of $n$ items, (b) learning the
mode of the distribution, and (c) identifying the top
$k$ items. For many of these problems, we provide
efficient algorithms to infer the ranking directly from
the data without the need to estimate the underlying
distribution. In other cases, we use the Principle of
Maximum Entropy to devise a concise parameterization of
a distribution consistent with observations using only
O(n$^2$ ) parameters, where $n$ is the number of items
in question. We propose a distributed, iterative
algorithm for estimating the parameters of the
distribution. We establish the correctness of the
algorithm and identify its rate of convergence
explicitly.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Duffield:2012:FSA,
author = "Nick Duffield",
title = "Fair sampling across network flow measurements",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "367--378",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254800",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Sampling is crucial for controlling resource
consumption by internet traffic flow measurements.
Routers use Packet Sampled NetFlow, and completed flow
records are sampled in the measurement infrastructure.
Recent research, motivated by the need of service
providers to accurately measure both small and large
traffic subpopulations, has focused on distributing a
packet sampling budget amongst subpopulations. But long
timescales of hardware development and lower bandwidth
costs motivate post-measurement analysis of complete
flow records at collectors instead. Sampling in
collector databases then manages data volumes, yielding
general purpose summaries that are rapidly queried to
trigger drill-down analysis on a time limited window of
full data. These are sufficiently small to be archived.
This paper addresses the problem of distributing a
sampling budget over subpopulations of flow records.
Estimation accuracy goals are met by fairly sharing the
budget. We establish a correspondence between the type
of accuracy goal, and the flavor of fair sharing used.
A streaming Max-Min Fair Sampling algorithm fairly
shares the sampling budget across subpopulations, with
sampling as a mechanism to deallocate budget. This
provides timely samples and is robust against
uncertainties in configuration and demand. We
illustrate using flow records from an access router of
a large ISP, where rates over interface traffic
subpopulations vary over several orders of magnitude.
We detail an implementation whose computational cost is
no worse than subpopulation-oblivious sampling.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Peng:2012:TBN,
author = "Kunyang Peng and Qunfeng Dong",
title = "{TCAM}-based {NFA} implementation",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "379--380",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254802",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Regular expression matching as the core packet
inspection engine of network systems has long been
striving to be both fast in matching speed (like DFA)
and scalable in storage space (like NFA). Recently,
ternary content addressable memory (TCAM) has been
investigated as a promising way out, by implementing
DFA using TCAM for regular express matching. In this
paper, we present the first method for implementing NFA
using TCAM. Through proper TCAM encoding, our method
matches each input byte with one single TCAM lookup ---
operating at precisely the same speed as DFA, while
using a number of TCAM entries that can be close to NFA
size. These properties make our method an important
step along a new path --- TCAM-based NFA implementation
--- towards the long-standing goal of fast and scalable
regular expression matching.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Anshelevich:2012:SEP,
author = "Elliot Anshelevich and Ameya Hate and Koushik Kar and
Michael Usher",
title = "Stable and efficient pricing for inter-domain traffic
forwarding",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "381--382",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254803",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We address the question of strategic pricing of
inter-domain traffic forwarding services provided by
ISPs, which is also closely coupled with the question
of how ISPs route their traffic towards their
neighboring ISPs. Posing this question as a
non-cooperative game between neighboring ISPs, we study
the properties of this pricing game in terms of the
existence and efficiency of the equilibrium. We observe
that for ``well-provisioned'' ISPs, Nash equilibrium
prices exist and they result in flows that maximize the
overall network utility (generalized end-to-end
throughput). For general ISP topologies, equilibrium
prices may not exist; however, simulations on a large
number of realistic topologies show that best-response
based simple price update solutions converge to stable
and efficient prices and flows for most topologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{DiCioccio:2012:MCH,
author = "Lucas DiCioccio and Renata Teixeira and Catherine
Rosenberg",
title = "Measuring and characterizing home networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "383--384",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254804",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper presents the design and evaluation of
HomeNet Profiler, a tool that runs on an end-system in
the home to collect data from home networks. HomeNet
Profiler collects a wide range of measurements
including: the set of devices, the set of services
(with UPnP and Zeroconf), and the characteristics of
the WiFi environment. Since the release of HomeNet
Profiler in April 2011, we have collected data from
over 2,400 distinct homes in 46 different countries.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sommers:2012:CMA,
author = "Joel Sommers and Paul Barford",
title = "Comparing metro-area cellular and {WiFi} performance:
extended abstract",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "385--386",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254805",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cellular and 802.11 WiFi offer two compelling
connectivity options for mobile users. The goal of our
work is to better understand performance
characteristics of these technologies in diverse
environments and conditions. To that end, we compare
and contrast cellular and Wifi performance using
crowd-sourced data from speedtest.net. We consider
spatio-temporal performance aspects (e.g., upload and
download throughput and latency) using over 3 million
user-initiated tests initiated in 15 different metro
areas, collected over 15 weeks. In these preliminary
results, we find that WiFi performance generally
exceeds cellular performance, and that observed
characteristics are highly variable across different
locations and times of day. We also observe diverse
performance characteristics resulting from the rollout
of new cell access technologies and service differences
among local providers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Nemeth:2012:TSC,
author = "G{\'a}bor N{\'e}meth and G{\'a}bor R{\'e}tv{\'a}ri",
title = "Towards a statistical characterization of the
competitiveness of oblivious routing",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "387--388",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254806",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Oblivious routing asks for a static routing that
serves arbitrary user demands with minimal performance
penalty. Performance is measured in terms of the
competitive ratio, the proportion of the maximum
congestion to the best possible congestion. In this
paper, we take the first steps towards extending this
worst-case characterization to a more revealing
statistical one. We define new performance metrics and
we present numerical evaluations showing that, in
statistical terms, oblivious routing is not as
competitive as the worst-case performance
characterizations would suggest.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zarifzadeh:2012:RT,
author = "Sajjad Zarifzadeh and Madhwaraj G. K. and Constantine
Dovrolis",
title = "Range tomography",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "389--390",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254807",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:SAM,
author = "Myungjin Lee and Nick Duffield and Ramana Rao
Kompella",
title = "A scalable architecture for maintaining packet latency
measurements",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "391--392",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254808",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Latency has become an important metric for network
monitoring since the emergence of new latency-sensitive
applications (e.g., algorithmic trading and
high-performance computing). In this paper, to provide
latency measurements at both finer (e.g., packet) as
well as flexible (e.g., flow subsets) levels of
granularity, we propose an architecture called MAPLE
that essentially stores packet-level latencies in
routers and allows network operators to query the
latency of arbitrary traffic sub-populations. MAPLE is
built using a scalable data structure called SVBF with
small storage needs.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Laner:2012:MRN,
author = "Markus Laner and Philipp Svoboda and Markus Rupp",
title = "Modeling randomness in network traffic",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "393--394",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254809",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A continuous challenge in the field of network traffic
modeling is to map recorded traffic onto parameters of
random processes, in order to enable simulations of the
respective traffic. A key element thereof is a
convenient model which is simple, yet, captures the
most relevant statistics. This work aims to find such a
model which, more precisely, enables the generation of
multiple random processes with arbitrary but jointly
characterized distributions, auto-correlation functions
and cross-correlations. Hence, we present the
definition of a novel class of models, the derivation
of a respective closed-form analytical representation
and its application on real network traffic. Our
modeling approach comprises: (i) generating statistical
dependent Gaussian random processes, (ii) introducing
auto-correlation to each process with a linear filter
and, (iii) transforming them sample-wise by real-valued
polynomial functions in order to shape their
distributions. This particular structure allows to
split the parameter fitting problem into three
independent parts, each of which solvable by standard
methods. Therefore, it is simple and straightforward to
fit the model to measurement data.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gallo:2012:PER,
author = "Massimo Gallo and Bruno Kauffmann and Luca Muscariello
and Alain Simonian and Christian Tanguy",
title = "Performance evaluation of the random replacement
policy for networks of caches",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "395--396",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254810",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Caching is a key component for Content Distribution
Networks and new Information-Centric Network
architectures. In this paper, we address performance
issues of caching networks running the RND replacement
policy. We first prove that when the popularity
distribution follows a general power-law with decay
exponent $ \alpha > 1 $, the miss probability is
asymptotic to $ O(C^{1 - \alpha }) $ for large cache
size $C$. We further evaluate network of caches under
RND policy for homogeneous tree networks and extend the
analysis to tandem cache networks where caches employ
either LRU or RND policies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mukherjee:2012:SCT,
author = "Koyel Mukherjee and Samir Khuller and Amol Deshpande",
title = "Saving on cooling: the thermal scheduling problem",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "397--398",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254811",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bodas:2012:CCM,
author = "Shreeshankar Bodas and Devavrat Shah and Damon
Wischik",
title = "Congestion control meets medium access: throughput,
delay, and complexity",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "399--400",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254812",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper looks at the problem of designing medium
access algorithm for wireless networks with the
objective of providing high throughput and low delay
performance to the users, while requiring only a modest
computational effort at the transmitters and receivers.
Additive inter-user interference at the receivers is an
important physical layer characteristic of wireless
networks. Today's Wi-Fi networks are based upon the
abstraction of physical layer where inter-user
interference is considered as noise leading to the
'collision' model in which users are required to
co-ordinate their transmissions through Carrier Sensing
Multiple Access (CSMA)-based schemes to avoid
interference. This, in turn, leads to an inherent
performance trade-off [1]: it is impossible to obtain
high throughput and low delay by means of low
complexity medium access algorithm (unless P=NP). As
the main result, we establish that this trade-off is
primarily due to treating interference as noise in the
current wireless architecture. Concretely, we develop a
simple medium access algorithm that allows for
simultaneous transmissions of users to the same
receiver by performing joint decoding at receivers,
over time. For a receiver to be able to decode multiple
transmissions quickly enough, we develop appropriate
congestion control where each transmitter maintains a
``window'' of undecoded transmitted data that is
adjusted based upon the ``feedback'' from the receiver.
In summary, this provides an efficient, low complexity
``online'' code operating at varying rate, and the
system as a whole experiences only small amount of
delay (including decoding time) while operating at high
throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tantawi:2012:OCP,
author = "Asser N. Tantawi",
title = "Optimized cloud placement of virtual clusters using
biased importance sampling",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "401--402",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254813",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We introduce an algorithm for the placement of
constrained, networked virtual clusters in the cloud,
that is based on importance sampling (also known as
cross-entropy). Rather than using a straightforward
implementation of such a technique, which proved
inefficient, we considerably enhance the method by
biasing the sampling process to incorporate
communication needs and other constraints of placement
requests to yield an efficient algorithm that is linear
in the size of the cloud. We investigate the quality of
the results of using our algorithm on a simulated
cloud.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shen:2012:PEC,
author = "Kai Shen and Arrvindh Shriraman and Sandhya Dwarkadas
and Xiao Zhang",
title = "Power and energy containers for multicore servers",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "403--404",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254814",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Power capping and energy efficiency are critical
concerns in server systems, particularly when serving
dynamic workloads on resource-sharing multicores. We
present a new operating system facility (power and
energy containers) that accounts for and controls the
power/energy usage of individual fine-grained server
requests. This facility is enabled by novel techniques
for multicore power attribution to concurrent tasks,
measurement/modeling alignment to enhance
predictability, and request power accounting and
control.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2012:CIW,
author = "Kai Wang and Minghong Lin and Florin Ciucu and Adam
Wierman and Chuang Lin",
title = "Characterizing the impact of the workload on the value
of dynamic resizing in data centers",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "405--406",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254815",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy consumption imposes a significant cost for data
centers; yet much of that energy is used to maintain
excess service capacity during periods of predictably
low load. Resultantly, there has recently been interest
in developing designs that allow the service capacity
to be dynamically resized to match the current
workload. However, there is still much debate about the
value of such approaches in real settings. In this
paper, we show that the value of dynamic resizing is
highly dependent on statistics of the workload process.
In particular, both slow time-scale non-stationarities
of the workload (e.g., the peak-to-mean ratio) and the
fast time-scale stochasticity (e.g., the burstiness of
arrivals) play key roles. To illustrate the impact of
these factors, we combine optimization-based modeling
of the slow time-scale with stochastic modeling of the
fast time scale. Within this framework, we provide both
analytic and numerical results characterizing when
dynamic resizing does (and does not) provide
benefits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2012:PLSa,
author = "Yue Tan and Yingdong Lu and Cathy H. Xia",
title = "Provisioning for large scale cloud computing
services",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "407--408",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254816",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Resource provisioning, the task of planning sufficient
amounts of resources to meet service level agreements,
has become an important management task in emerging
cloud computing services. In this paper, we present a
stochastic modeling approach to guide the resource
provisioning task for future service clouds as the
demand grows large. We focus on on-demand services and
consider service availability as the key quality of
service constraint. A specific scenario under
consideration is when resources can be measured in base
instances. We develop an asymptotic provisioning
methodology that utilizes tight performance bounds for
the Erlang loss system to determine the minimum
capacity levels that meet the service availability
requirements. We show that our provisioning solutions
are not only asymptotically exact but also provide
better QoS guarantees at all load conditions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Narayana:2012:DWA,
author = "Srinivas Narayana and Joe Wenjie Jiang and Jennifer
Rexford and Mung Chiang",
title = "Distributed wide-area traffic management for cloud
services",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "409--410",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254817",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of interactive cloud services depends
heavily on which data centers handle client requests,
and which wide-area paths carry traffic. While making
these decisions, cloud service providers also need to
weigh operational considerations like electricity and
bandwidth costs, and balancing server loads across
replicas. We argue that selecting data centers and
network routes independently, as is common in today's
services, can lead to much lower performance or higher
costs than a coordinated decision. However,
fine-grained joint control of two large distributed
systems---e.g., DNS-based replica-mapping and data
center multi-homed routing---can be administratively
challenging. In this paper, we introduce the design of
a system that jointly optimizes replica-mapping and
multi-homed routing, while retaining the functional
separation that exists between them today. We show how
to construct a provably optimal distributed solution
implemented through local computations and message
exchanges between the mapping and routing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Dixit:2012:EFG,
author = "Advait Abhay Dixit and Pawan Prakash and Ramana Rao
Kompella and Charlie Hu",
title = "On the efficacy of fine-grained traffic splitting
protocols in data center networks",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "411--412",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254818",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Current multipath routing techniques split traffic at
a per-flow level because, according to conventional
wisdom, forwarding packets of a TCP flow along
different paths leads to packet reordering which is
detrimental to TCP. In this paper, we revisit this
``myth'' in the context of cloud data center networks
which have regular topologies such as multi-rooted
trees. We argue that due to the symmetry in the
multiple equal-cost paths in such networks, simply
spraying packets of a given flow among all equal-cost
paths, leads to balanced queues across multiple paths,
and consequently little packet reordering. Using a
testbed comprising of NetFPGA switches, we show how
cloud applications benefit from better network
utilization in data centers.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Frank:2012:CAT,
author = "Benjamin Frank and Ingmar Poese and Georgios
Smaragdakis and Steve Uhlig and Anja Feldmann",
title = "Content-aware traffic engineering",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "413--414",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254819",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Recent studies show that a large fraction of Internet
traffic is originated by Content Providers (CPs) such
as content distribution networks and hyper-giants. To
cope with the increasing demand for content, CPs deploy
massively distributed server infrastructures. Thus,
content is available in many network locations and can
be downloaded by traversing different paths in a
network. Despite the prominent server location and path
diversity, the decisions on how to map users to servers
by CPs and how to perform traffic engineering by ISPs,
are independent. This leads to a lose-lose situation as
CPs are not aware about the network bottlenecks nor the
location of end-users, and the ISPs struggle to cope
with rapid traffic shifts caused by the dynamic CP
server selection process. In this paper we propose and
evaluate Content-aware Traffic Engineering (CaTE),
which dynamically adapts the traffic demand for content
hosted on CPs by utilizing ISP network information and
end-user location during the server selection process.
This leads to a win-win situation because CPs are able
to enhance their end-user to server mapping and ISPs
gain the ability to partially influence the traffic
demands in their networks. Indeed, our results using
traces from a Tier-1 ISP show that a number of network
metrics can be improved when utilizing CaTE.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hu:2012:UPA,
author = "Jian Hu and Hong Jiang and Prakash Manden",
title = "Understanding performance anomalies of {SSDs} and
their impact in enterprise application environment",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "415--416",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254820",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "SSD is known to have the erase-before-write and
out-of-place update properties. When the number of
invalidated pages is more than a given threshold, a
process referred to as garbage collection (GC) is
triggered to erase blocks after valid pages in these
blocks are copied somewhere else. GC degrades both the
performance and lifetime of SSD significantly because
of the read-write-erase operation sequence. In this
paper, we conduct intensive experiments on a 120GB
Intel 320 SATA SSD and a 320GB Fusion IO ioDrive PCI-E
SSD to show and analyze the following important
performance issues and anomalies. The commonly accepted
knowledge that the performance drops sharply as more
data is being written is not always true. This is
because GC efficiency, a more important factor
affecting SSD performance, has not been carefully
considered. It is defined as the percentage of invalid
pages of a GC erased block. It is possible to avoid the
performance degradation by managing the addressable LBA
range. Estimating the residual lifetime of an SSD is a
very challenging problem because it involves several
interdependent and mutually interacting factors such as
FTL, GC, wear leveling, workload characteristics, etc.
We develop an analytical model to estimate the residual
lifetime of a given SSD. The high random-read
performance is widely accepted as one of the advantages
of SSD. We will show that this is not true if the GC
efficiency is low.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Glatz:2012:CIO,
author = "Eduard Glatz and Xenofontas Dimitropoulos",
title = "Classifying {Internet} one-way traffic",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "417--418",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254821",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we analyze a massive data-set that
captures 5.23 petabytes of traffic to shed light into
the composition of one-way traffic towards a large
network based on a novel one-way traffic classifier. We
find that one-way traffic makes a very large fraction
of all traffic in terms of flows, it can be primarily
attributed to malicious causes, and it has declined
since 2004 because of relative decrease of scan
traffic. In addition, we show how our classifier is
useful for detecting network outages.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Arora:2012:FCE,
author = "Manish Arora and Feng Wang and Bob Rychlik and Dean
Tullsen",
title = "Fast cost efficient designs by building upon the
{Plackett} and {Burman} method",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "419--420",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254822",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "CPU processor design involves a large set of
increasingly complex design decisions, and simulating
all possible designs is typically not feasible.
Sensitivity analysis, a commonly used technique, can be
dependent on the starting point of the design and does
not necessarily account for the cost of each parameter.
This work proposes a method to simultaneously analyzes
multiple parameters with a small number of experiments
by leveraging the Plackett and Burman (P\&B) analysis
method. It builds upon the technique in two specific
ways. It allows a parameter to take multiple values and
replaces the unit-less impact factor with
cost-proportional values.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Keller:2012:MHN,
author = "Matthias Keller and Jan Beutel and Lothar Thiele",
title = "Multi-hop network tomography: path reconstruction and
per-hop arrival time estimation from partial
information",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "421--422",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254823",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In the context of low-power wireless sensor networks,
this paper presents multi-hop network tomography (MNT),
a novel, non-intrusive algorithm for reconstructing the
path, the per-hop arrival order, and the per-hop
arrival time of individual packets at runtime. While
explicitly transmitting this information over the radio
would negatively impact the performance of the system
under investigation, information is instead
reconstructed after packets have been received at the
sink.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papapanagiotou:2012:SVL,
author = "Ioannis Papapanagiotou and Erich M. Nahum and
Vasileios Pappas",
title = "Smartphones vs. laptops: comparing {Web} browsing
behavior and the implications for caching",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "423--424",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254824",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we present the differences and
similarities of the web browsing behavior in most
common mobile platforms. We devise a novel Operating
System (OS) fingerprinting methodology to distinguish
different types of wireless devices (smartphone vs
laptops) as well as operating system instances (iOS,
Android, BlackBerry etc.). We showcase that most of the
multimedia content in smartphone devices is delivered
via Range-Requests, and a large portion of the video
transfers are aborted. We also show that laptop devices
have more intelligent browser caching capabilities. We
investigate the impact of an additional browser cache,
and demonstrate that a 10MB browser cache that is able
to handle partial downloads in smartphones would be
enough to handle the majority of the savings. Finally,
we showcase that caching policies need to be amended to
attain the maximum possible savings in proxy caches.
Based on those optimizations the emulated proxy cache
provides 10\%--20\% in bandwidth savings.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Reinecke:2012:MMV,
author = "Philipp Reinecke and Mikl{\'o}s Telek and Katinka
Wolter",
title = "Micro and macro views of discrete-state {Markov}
models and their application to efficient simulation
with phase-type distributions",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "425--426",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254826",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bertran:2012:PFB,
author = "Ramon Bertran and Marc Gonz{\`a}lez and Xavier
Martorell and Nacho Navarro and Eduard Ayguad{\'e}",
title = "{POTRA}: a framework for building power models for
next generation multicore architectures",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "427--428",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254827",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hayden:2012:BTS,
author = "Richard A. Hayden",
title = "Basic theory and some applications of martingales",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "429--430",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254828",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This tutorial surveys the fundamental results of the
theory of martingales from the perspective of the
performance engineer. We will present the fundamental
results and illustrate their power through simple and
elegant proofs of important and well-known results in
performance analysis. The remainder of the tutorial
will introduce the martingale functional central limit
theorem and semi-martingale decomposition methodology
for the characterization and proof of heavy-traffic
limit results for Markovian queueing systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{deSouzaeSilva:2012:AML,
author = "Edmundo {de Souza e Silva} and Daniel Sadoc Menasche",
title = "Applications of machine learning to performance
evaluation",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "431--432",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254829",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Aikat:2012:INE,
author = "Jay Aikat and Kevin Jeffay",
title = "Introduction to network experiments using the {GENI}
cyberinfrastructure",
journal = j-SIGMETRICS,
volume = "40",
number = "1",
pages = "433--434",
month = jun,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2318857.2254830",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:39 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this tutorial, we will introduce the
SIGMETRICS/Performance community to the vast testbeds,
tools and resources openly available through the GENI
(Global Environment for Network Innovations) project.
We will present details about the distributed computing
resources available on GENI for researchers interested
in simulation as well as measurement-based performance
evaluation experiments. We will demonstrate simple
experiments on GENI, and leave them with information on
how to run experiments for research and education using
GENI resources.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Eriksson:2012:PLA,
author = "Brian Eriksson and Paul Barford and Bruce Maggs and
Robert Nowak",
title = "Posit: a lightweight approach for {IP} geolocation",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "2--11",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381058",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Location-specific Internet services are predicated on
the ability to identify the geographic position of IP
hosts accurately. Fundamental to current
state-of-the-art geolocation techniques is reliance on
heavyweight traceroute-like probes that put a
significant traffic load on networks. In this paper, we
introduce a new lightweight approach to IP geolocation
that we call Posit. This methodology requires only a
small number of delay measurements conducted to end
host targets in conjunction with a
computationally-efficient statistical embedding
technique. We demonstrate that Posit performs better
than all existing geolocation tools across a wide
spectrum of measurement infrastructures with varying
geographic densities. Specifically, Posit is shown to
geolocate hosts with median error improvements of over
55\% with respect to all current measurement-based IP
geolocation methodologies.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coucheney:2012:CSE,
author = "Pierre Coucheney and Patrick Maill{\'e} and Bruno
Tuffin",
title = "Comparison of search engines non-neutral and neutral
behaviors",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "14--17",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381060",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network neutrality has recently attracted a lot of
attention but search neutrality is also becoming a
vivid subject of discussion because a non-neutral
search may prevent some relevant content from being
accessed by users. We propose in this paper to model
two situations of a non-neutral search engine behavior,
which can rank the link propositions according to the
profit a search can generate for it instead of just
relevance: the case when the search engine owns some
content, and the case when it imposes a tax on organic
links, a bit similarly to what it does for commercial
links. We analyze the particular (and deterministic)
situation of a single keyword, and describe the problem
for the whole potential set of keywords.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hanawal:2012:GTA,
author = "Manjesh K. Hanawal and Eitan Altman and Rajesh
Sundaresan",
title = "Game theoretic analysis of collusions in nonneutral
networks",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "18--21",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381061",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper studies the impact of exclusive contracts
between a content provider (CP) and an internet service
provider (ISP) in a nonneutral network. We consider a
simple linear demand function for the CPs. We study
when an exclusive contract is beneficial to the
colluding pair and evaluate its impact on the
noncolluding players at equilibrium. For the case of
two CPs and one ISP we show that collusion may not
always be beneficial. We derive an explicit condition
in terms of the advertisement revenues of the CPs that
tells when a collusion is profitable to the colluding
entities.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yu:2012:GUW,
author = "Seung Min Yu and Seong-Lyun Kim",
title = "Guaranteeing user welfare in network service:
comparison of two subsidy schemes",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "22--25",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381062",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the emergence of smart devices, mobile data
traffic grows exponentially. A Cisco report predicts
that global mobile data traffic will increase 26-fold
between 2010 and 2015. Therefore, the spectrum shortage
continues and the spectrum price increases, which will
eventually lead to decrease of user welfare. Another
side effect of the data traffic growth is the
polarization of data traffic among users. To resolve
these problems, we introduce two subsidy schemes (i.e.,
price and quality of service (QoS) subsidy schemes) and
mathematically analyze the effect of each scheme. We
identify that if the regulator has sufficient spectrum
amount for the network service, then the QoS subsidy
scheme will be a good choice for all players in the
network service market. Otherwise, the price subsidy
scheme can be better from user welfare perspective.
Even though our analytic results are derived under some
assumptions for mathematical tractability, it will
provide good intuitions for spectrum regulation.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Berry:2012:NMC,
author = "R. Berry and M. Honig and T. Nguyen and V. Subramanian
and H. Zhou and R. Vohra",
title = "Newsvendor model of capacity sharing",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "26--29",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381063",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ma:2012:PDK,
author = "Richard T. B. Ma and Dah Ming Chiu and John C. S. Lui
and Vishal Misra and Dan Rubenstein",
title = "Price differentiation in the {Kelly} mechanism",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "30--33",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381064",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Houidi:2012:PTB,
author = "Zied Ben Houidi and Helia Pouyllau",
title = "The price of tussles: bankrupt in cyberspace?",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "34--37",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381065",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lodhi:2012:PSA,
author = "Aemen Lodhi and Amogh Dhamdhere and Constantine
Dovrolis",
title = "Peering strategy adoption by transit providers in the
{Internet}: a game theoretic approach?",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "38--41",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381066",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mastroeni:2012:PIP,
author = "Loretta Mastroeni and Maurizio Naldi",
title = "Pricing of insurance policies against cloud storage
price rises",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "42--45",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381067",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "When a company migrates to cloud storage, the way back
is neither fast nor cheap. The company is then locked
up in the storage contract and exposed to upward market
prices, which reduce the company's profit and may even
bring it below zero. We propose a protection means
based on an insurance contract, by which the cloud
purchaser is indemnified when the current storage price
exceeds a pre-defined threshold. By applying the
financial options theory, we provide a formula for the
insurance price (the premium). By using historical data
on market prices for disks, we apply the formula in
realistic scenarios. We show that the premium grows
nearly quadratically with the duration of the coverage
period as long as this is below one year, but grows
more slowly, though faster than linearly, over longer
coverage periods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:IVI,
author = "Dongmyung Lee and Jeonghoon Mo and Jinwoo Park",
title = "{ISP} vs. {ISP $+$ CDN}: can {ISPs} in duopoly profit
by introducing {CDN} services?",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "46--48",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381068",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper provides an economic analysis of the
ISP-operated CDN under a duopolistic competition. The
two ISPs are modeled as a platform in a two-sided
market providing Internet access to both content
providers and consumers. By formulating a 4-level
Stackelberg game, we have found that the equilibrium
strategy of an ISP in determining whether to launch CDN
service depends on the marginal cost of cache server
deployment and the two contrary effects: ``Competition
Effect'' and ``Delay Reduction Effect.''",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gulyas:2012:GNF,
author = "Andr{\'a}s Guly{\'a}s and Attila Kor{\"o}si and
D{\'a}vid Szab{\'o} and Gergely Bicz{\'o}k",
title = "On greedy network formation",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "49--52",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381069",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Greedy navigability is a central issue in the theory
of networks. However, the exogenous nature of network
models do not allow for describing how greedy
routable-networks emerge in reality. In turn, network
formation games focus on the very emergence process,
but the applied shortest-path based cost functions
exclude navigational aspects. This paper takes a first
step towards incorporating both emergence (missing in
algorithmic network models) and greedy navigability
(missing in network formation games) into a single
framework, and proposes the Greedy Network Formation
Game. Our first contribution is the game definition,
where we assume a hidden metric space underneath the
network, and, instead of usual shortest path metric, we
use the length of greedy paths as the measure of
communication cost between players. Our main finding is
that greedy-routable small worlds do not emerge on
constant dimensional Eulidean grids. This simply means
that the emergence of topologies on which we understood
the principles of greedy forwarding cannot be explained
endogenously. We also present a very brief outlook on
how the situation changes in the hyperbolic space.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ramakrishnan:2012:EIV,
author = "Lavanya Ramakrishnan and R. Shane Canon and Krishna
Muriki and Iwona Sakrejda and Nicholas J. Wright",
title = "Evaluating Interconnect and Virtualization Performance
for High Performance Computing",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "55--60",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381071",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scientists are increasingly considering cloud
computing platforms to satisfy their computational
needs. Previous work has shown that virtualized cloud
environments can have significant performance impact.
However there is still a limited understanding of the
nature of overheads and the type of applications that
might do well in these environments. In this paper we
detail benchmarking results that characterize the
virtualization overhead and its impact on performance.
We also examine the performance of various interconnect
technologies with a view to understanding the
performance impacts of various choices. Our results
show that virtualization can have a significant impact
upon performance, with at least a 60\% performance
penalty. We also show that less capable interconnect
technologies can have a significant impact upon
performance of typical HPC applications. We also
evaluate the performance of the Amazon Cluster compute
instance and show that it performs approximately
equivalently to a 10G Ethernet cluster at low core
counts.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mudalige:2012:PMA,
author = "G. R. Mudalige and M. B. Giles and C. Bertolli and P.
H. J. Kelly",
title = "Predictive modeling and analysis of {OP2} on
distributed memory {GPU} clusters",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "61--67",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381072",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "OP2 is an ``active'' library framework for the
development and solution of unstructured mesh based
applications. It aims to decouple the scientific
specification of an application from its parallel
implementation to achieve code longevity and
near-optimal performance through re-targeting the
backend to different multi-core/many-core hardware.
This paper presents a predictive performance analysis
and benchmarking study of OP2 on heterogeneous cluster
systems. We first present the design of a new OP2
back-end that enables the execution of applications on
distributed memory clusters, and benchmark its
performance during the solution of a 1.5M and 26M
edge-based CFD application written using OP2. Benchmark
systems include a large-scale CrayXE6 system and an
Intel Westmere/InfiniBand cluster. We then apply
performance modeling to predict the application's
performance on an NVIDIA Tesla C2070 based GPU cluster,
enabling us to compare OP2's performance capabilities
on emerging distributed memory heterogeneous systems.
Results illustrate the performance benefits that can be
gained through many-core solutions both on single-node
and heterogeneous configurations in comparison to
traditional homogeneous cluster systems for this class
of applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mateescu:2012:OMT,
author = "Gabriel Mateescu and Gregory H. Bauer and Robert A.
Fiedler",
title = "Optimizing matrix transposes using a {POWER7} cache
model and explicit prefetching",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "68--73",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381073",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider the problem of efficiently computing
matrix transposes on the POWER7 architecture. We
develop a matrix transpose algorithm that uses cache
blocking, cache prefetching and data alignment. We
model the POWER7 data cache and memory concurrency and
use the model to predict the memory throughput of the
proposed matrix transpose algorithm. The performance of
our matrix transpose algorithm is up to five times
higher than that of the {\tt dgetmo} routine of the
Engineering and Scientific Subroutine Library and is
2.5 times higher than that of the code generated by
compiler-inserted prefetching. Numerical experiments
indicate a good agreement between the predicted and the
measured memory throughput.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Danalis:2012:BPH,
author = "Anthony Danalis and Piotr Luszczek and Gabriel Marin
and Jeffrey S. Vetter and Jack Dongarra",
title = "{BlackjackBench}: portable hardware characterization",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "74--79",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381074",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "DARPA's AACE project aimed to develop Architecture
Aware Compiler Environments that automatically
characterizes the hardware and optimizes the
application codes accordingly. We present the
BlackjackBench --- a suite of portable benchmarks that
automate system characterization, plus statistical
analysis techniques for interpreting the results. The
BlackjackBench discovers the effective sizes and speeds
of the hardware environment rather than the often
unattainable peak values. We aim at hardware
characteristics that can be observed by running
standard C codes. We characterize the memory hierarchy,
including cache sharing and NUMA characteristics of the
system, properties of the processing cores affecting
instruction execution speed, and the length of the OS
scheduler time slot. We show how they all could
potentially interfere with each other and how
established classification and statistical analysis
techniques reduce experimental noise and aid automatic
interpretation of results.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tineo:2012:TAA,
author = "Adrian Tineo and Sadaf R. Alam and Thomas C.
Schulthess",
title = "Towards autotuning by alternating communication
methods",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "80--85",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381075",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Interconnects in emerging high performance computing
systems feature hardware support for one-sided,
asynchronous communication and global address space
programming models in order to improve parallel
efficiency and productivity by allowing communication
and computation overlap and out-of-order delivery. In
practice though, complex interactions between the
software stack and the communication hardware make it
challenging to obtain optimum performance for a full
application expressed with a one-sided programming
paradigm. Here, we present a proof-of-concept study for
an autotuning framework that instantiates hybrid
kernels based on refactored codes using available
communication libraries or languages on a Cray XE6 and
a SGI Altix UV 1000. We validate our approach by
improving performance for bandwidth- and latency-bound
kernels of interest in quantum physics and astrophysics
by up to 35\% and 80\% respectively.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Iakymchuk:2012:MPT,
author = "Roman Iakymchuk and Paolo Bientinesi",
title = "Modeling performance through memory-stalls",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "86--91",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381076",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We aim at modeling the performance of linear algebra
algorithms without executing either them or parts of
them. The performance of an algorithm can be expressed
in terms of the time spent on CPU execution and on
memory-stalls. The main concern of this paper is to
build analytical models to accurately predict
memory-stalls. The scenario in which data resides in
the L2 cache is considered; with this assumption, only
L1 cache misses occur. We construct an analytical
formula for modeling the L1 cache misses of fundamental
linear algebra operations such as those included in the
Basic Linear Algebra Subprograms (BLAS) library. The
number of cache misses occurring in higher-level
algorithms ``like a matrix factorization'' is then
predicted by combining the models for the appropriate
BLAS subroutines. As case studies, we consider GER, a
BLAS level-2 operation, and the LU factorization. The
models are validated on both Intel and AMD processors,
attaining remarkably accurate performance
predictions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Shan:2012:PEH,
author = "Hongzhang Shan and Nicholas J. Wright and John Shalf
and Katherine Yelick and Marcus Wagner and Nathan
Wichmann",
title = "A preliminary evaluation of the hardware acceleration
of the {Cray Gemini} interconnect for {PGAS} languages
and comparison with {MPI}",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "92--98",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381077",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The Gemini interconnect on the Cray XE6 platform
provides for lightweight remote direct memory access
(RDMA) between nodes, which is useful for implementing
partitioned global address space (PGAS) languages like
UPC and Co-Array Fortran. In this paper, we perform a
study of Gemini performance using a set of
communication microbenchmarks and compare the
performance of one-sided communication in PGAS
languages with two-sided MPI. Our results demonstrate
the performance benefits of the PGAS model on Gemini
hardware, showing in what circumstances and by how much
one-sided communication outperforms two-sided in terms
of messaging rate, aggregate bandwidth, and computation
and communication overlap capability. For example, for
8-byte and 2KB messages the one-sided messaging rate is
5 and 10 times greater respectively than the two-sided
one. The study also reveals important information about
how to optimize one-sided Gemini communication.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Deshpande:2012:AGC,
author = "Vivek Deshpande and Xing Wu and Frank Mueller",
title = "Auto-generation of communication benchmark traces",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "99--105",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381078",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Benchmarks are essential for evaluating HPC hardware
and software for petascale machines and beyond. But
benchmark creation is a tedious manual process. As a
result, benchmarks tend to lag behind the development
of complex scientific codes. Our work automates the
creation of communication benchmarks. Given an MPI
application, we utilize ScalaTrace, a lossless and
scalable framework to trace communication operations
and execution time while abstracting away the
computations. A single trace file that reflects the
behavior of all nodes is subsequently expanded to C
source code by a novel code generator. This resulting
benchmark code is compact, portable, human-readable,
and accurately reflects the original application's
communication characteristics and performance.
Experimental results demonstrate that generated source
code of benchmarks preserves both the communication
patterns and the run-time behavior of the original
application. Such automatically generated benchmarks
not only shorten the transition from application
development to benchmark extraction but also facilitate
code obfuscation, which is essential for benchmark
extraction from commercial and restricted
applications.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Su:2012:CPB,
author = "ChunYi Su and Dong Li and Dimitrios S. Nikolopoulos
and Matthew Grove and Kirk Cameron and Bronis R. de
Supinski",
title = "Critical path-based thread placement for {NUMA}
systems",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "106--112",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381079",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Multicore multiprocessors use a Non Uniform Memory
Architecture (NUMA) to improve their scalability.
However, NUMA introduces performance penalties due to
remote memory accesses. Without efficiently managing
data layout and thread mapping to cores, scientific
applications may suffer performance loss, even if they
are optimized for NUMA. In this paper, we present
algorithms and a runtime system that optimize the
execution of OpenMP applications on NUMA architectures.
By collecting information from hardware counters, the
runtime system directs thread placement and reduces
performance penalties by minimizing the critical path
of OpenMP parallel regions. The runtime system uses a
scalable algorithm that derives placement decisions
with negligible overhead. We evaluate our algorithms
and the runtime system with four NPB applications
implemented in OpenMP. On average the algorithms
achieve between 8.13\% and 25.68\% performance
improvement, compared to the default Linux thread
placement scheme. The algorithms miss the optimal
thread placement in only 8.9\% of the cases.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lee:2012:BMD,
author = "DongJin Lee and Michael O'Sullivan and Cameron
Walker",
title = "Benchmarking and modeling disk-based storage tiers for
practical storage design",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "113--118",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381080",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper investigates benchmarking and modeling for
a disk-based storage system in order to design and
build a practical storage tier. As a practical case
study, we focus on the design of an archival storage
tier. The archival tiers play a critical role in data
preservation as almost all current data will eventually
be archived and the demands placed on archival tiers
are growing because of large regularly-scheduled
back-ups. Archival tiers usually consist of tape-based
devices with a large storage capacity, but limited I/O
performance for retrieving data, especially when
multiple retrieval requests are made simultaneously. As
the cost of disk-based devices continues to decrease
while the capacity of individual disks increases,
disk-based systems are becoming a more realistic option
for both enterprise and commodity archival storage
tiers. We utilize archival workloads developed from an
analysis of historical data in order to provide
accurate and robust benchmarks of system performance as
an archive. We then embed our practical measurements in
a measurement-driven optimization approach to design an
archival system. Our approach produces a low cost
design for a commodity disk-based archival storage
system. Using our measurement-driven model, ideal
storage building blocks are identified for a real-world
archival tier design.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2012:TEG,
author = "Lingyuan Wang and Miaoqing Huang and Tarek
El-Ghazawi",
title = "Towards efficient {GPU} sharing on multicore
processors",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "119--124",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381081",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Scalable systems employing a mix of GPUs with CPUs are
becoming increasingly prevalent in high-performance
computing. The presence of such accelerators introduces
significant challenges and complexities to both
language developers and end users. This paper provides
a close study of efficient coordination mechanisms to
handle parallel requests from multiple hosts of control
to a GPU under hybrid programming. Using a set of
microbenchmarks and applications on a GPU cluster, we
show that thread and process-based context hosting have
different tradeoffs. Experimental results on
application benchmarks suggest that both thread-based
context funneling and process-based context switching
natively perform similarly on the latest Fermi GPUs,
while manually guided context funneling is currently
the best way to achieve optimal performance.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Sun:2012:APM,
author = "Xian-He Sun and Dawei Wang",
title = "{APC}: a performance metric of memory systems",
journal = j-SIGMETRICS,
volume = "40",
number = "2",
pages = "125--130",
month = sep,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2381056.2381082",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Fri Nov 9 11:06:40 MST 2012",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Due to the infamous ``memory wall'' problem and a
drastic increase in the number of data intensive
applications, memory rather than processor has become
the leading performance bottleneck of modern computing
systems. Evaluating and understanding memory system
performance is increasingly becoming the core of
high-end computing. Conventional memory metrics, such
as miss ratio, average miss latency, average memory
access time, etc., are designed to measure a given
memory performance parameter, and do not reflect the
overall performance of a memory system. On the other
hand, widely used system measurement metrics, such as
IPC and Flops are designed to measure CPU performance,
and do not directly reflect memory performance. In this
paper, we proposed a novel memory metric, Access Per
Cycle (APC), to measure overall memory performance with
consideration of the complexity of modern memory
systems. A unique contribution of APC is its separation
of memory evaluation from CPU evaluation; therefore, it
provides a quantitative measurement of the
``data-intensiveness'' of an application. The concept
of APC is introduced; a constructive investigation
counting the number of data accesses and access cycles
at differing levels of the memory hierarchy is
conducted; finally some important usages of APC are
presented. Simulation results show that APC is
significantly more appropriate than the existing memory
metrics in evaluating modern memory systems.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Vitali:2012:LSO,
author = "Roberto Vitali and Alessandro Pellegrini and Francesco
Quaglia",
title = "Load sharing for optimistic parallel simulations on
multi core machines",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "2--11",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425250",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Parallel Discrete Event Simulation (PDES) is based on
the partitioning of the simulation model into distinct
Logical Processes (LPs), each one modeling a portion of
the entire system, which are allowed to execute
simulation events concurrently. This allows exploiting
parallel computing architectures to speedup model
execution, and to make very large models tractable. In
this article we cope with the optimistic approach to
PDES, where LPs are allowed to concurrently process
their events in a speculative fashion, and rollback/
recovery techniques are used to guarantee state
consistency in case of causality violations along the
speculative execution path. Particularly, we present an
innovative load sharing approach targeted at optimizing
resource usage for fruitful simulation work when
running an optimistic PDES environment on top of
multi-processor/multi-core machines. Beyond providing
the load sharing model, we also define a load sharing
oriented architectural scheme, based on a symmetric
multi-threaded organization of the simulation platform.
Finally, we present a real implementation of the load
sharing architecture within the open source ROme
OpTimistic Simulator (ROOT-Sim) package. Experimental
data for an assessment of both viability and
effectiveness of our proposal are presented as well.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Hahnel:2012:MEC,
author = "Marcus H{\"a}hnel and Bj{\"o}rn D{\"o}bel and Marcus
V{\"o}lp and Hermann H{\"a}rtig",
title = "Measuring energy consumption for short code paths
using {RAPL}",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "13--17",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425252",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Measuring the energy consumption of software
components is a major building block for generating
models that allow for energy-aware scheduling,
accounting and budgeting. Current measurement
techniques focus on coarse-grained measurements of
application or system events. However, fine grain
adjustments in particular in the operating-system
kernel and in application-level servers require power
profiles at the level of a single software function.
Until recently, this appeared to be impossible due to
the lacking fine grain resolution and high costs of
measurement equipment. In this paper we report on our
experience in using the Running Average Power Limit
(RAPL) energy sensors available in recent Intel CPUs
for measuring energy consumption of short code paths.
We investigate the granularity at which RAPL
measurements can be performed and discuss practical
obstacles that occur when performing these measurements
on complex modern CPUs. Furthermore, we demonstrate how
to use the RAPL infrastructure to characterize the
energy costs for decoding video slices.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Mazzucco:2012:EEP,
author = "Michele Mazzucco and Isi Mitrani",
title = "Empirical evaluation of power saving policies for data
centers",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "18--22",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425253",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It has been suggested that the conflicting objectives
of high performance and low power consumption in a
service center can be met by powering a block of
servers on and off, in response to changing demand
conditions. To test that proposition, a dynamic
operating policy is evaluated in a real-life setting,
using the Amazon EC2 cloud platform. The application
running on the cluster is a replica of the English
edition of Wikipedia, with different streams of
requests generated by reading traces from a file and by
means of random numbers with a given mean and squared
coefficient of variation. The system costs achieved by
an 'optimized' version of the policy are compared to
those of a simple heuristic and also to a baseline
policy consisting of keeping all servers powered on all
the time and one where servers are re-allocated
periodically but reserves are not employed.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ghumre:2012:ENC,
author = "Pooja Ghumre and Junwei Li and Mukil Kesavan and Ada
Gavrilovska and Karsten Schwan",
title = "Evaluating the need for complexity in energy-aware
management for cloud platforms",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "23--27",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425254",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In order to curtail the continuous increase in power
consumption of modern datacenters, researchers are
responding with sophisticated energy-aware workload
management methods. This increases the complexity and
cost of the management operation, and may lead to
increases in failure rates. The goal of this paper is
to illustrate that there exists considerable diversity
in the effectiveness of different, potentially
'smarter' workload management methods depending on the
target metric or the characteristics of the workload
being managed. We conduct experiments on a datacenter
prototype platform, virtualized with the VMware vSphere
software, and using representative cloud applications
--- a distributed key-value store and a map-reduce
computation. We observe that, on our testbed, different
workload placement decisions may be quite effective for
some metrics, but may lead to only marginal impact on
others. In particular, we are considering the impact on
energy-related metrics, such as power or temperature,
as corresponding energy-aware management methods
typically come with greater complexity due to fact that
they must consider the complex energy consumption
trends of various components in the cloud
infrastructure. We show that for certain applications,
such costs can be avoided, as different management
policies and placement decisions have marginal impact
on the target metric. The objective is to understand
whether for certain classes of applications, and/or
application configurations, it is necessary to incur,
or if it is beneficial to avoid, the use of complex
management methods.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Gast:2012:OSP,
author = "Nicolas Gast and Dan-Cristian Tomozei and Jean-Yves
{Le Boudec}",
title = "Optimal storage policies with wind forecast
uncertainties",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "28--32",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425255",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The increase in penetration of wind in the current
energy mix is hindered by its high volatility and poor
predictability. These shortcomings lead to energy loss
and increased deployment of fast ramping generation.
The use of energy storage compensates to some extent
these negative effects; it plays a buffer role between
demand and production. We revisit a model of real
storage proposed by Bejan et al.[1]. We study the
impact on performance of energy conversion efficiency
and of wind prediction quality. Specifically, we
provide theoretical bounds on the trade-off between
energy loss and fast ramping generation, which we show
to be tight for large capacity of the available
storage. Moreover, we develop strategies that
outperform the proposed fixed level policies when
evaluated on real data from the UK grid.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bernstein:2012:SAP,
author = "Andrey Bernstein and Daniel Bienstock and David Hay
and Meric Uzuno{\u{g}}lu and Gil Zussman",
title = "Sensitivity analysis of the power grid vulnerability
to large-scale cascading failures",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "33--37",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425256",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This paper revisits models of cascading failures in
the transmission system of the power grid. It has been
recently shown that since power flows are governed by
the laws of physics,these models significantly differ
from epidemic/percolation-based models. Yet, while some
numerical results have been recently obtained based on
these models, there is a need to investigate the
sensitivity of the results to various parameters and to
evaluate the models' accuracy. In this paper, through
numerical experiments with real grid data, we study the
effects of geographically correlated outages and the
resulting cascades. We consider a wide range of
parameters, such as the power lines' Factor of Safety
and the sensitivity of the lines to power flow spikes.
Moreover, we compare our numerical results to the
actual events in a recent blackout in the San Diego
area (Sept. 2011), thereby demonstrating that the
model's predictions are consistent with real events.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardakanian:2012:RDC,
author = "O. Ardakanian and C. Rosenberg and S. Keshav",
title = "{RealTime} distributed congestion control for
electrical vehicle charging",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "38--42",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425257",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The significant load and unpredictable mobility of
electric vehicles (EVs) makes them a challenge for grid
distribution systems. Unlike most current approaches to
control EV charging, which construct optimal charging
schedules by predicting EV state of charge and future
behaviour, we leverage the anticipated widespread
deployment of measurement and control points to propose
an alternative vision. In our approach, drawing from a
comparative analysis of Internet and distribution grid
congestion, control actions taken by a charger every
few milliseconds in response to congestion signals
allow it to rapidly reduce its charging rate to avoid
grid congestion. We sketch three control schemes that
embody this vision and compare their relative merits
and demerits.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Ardakanian:2012:ISR,
author = "Omid Ardakanian and Catherine Rosenberg and S.
Keshav",
title = "On the impact of storage in residential power
distribution systems",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "43--47",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425258",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "It is anticipated that energy storage will be
incorporated into the distribution network component of
the future smart grid to allow desirable features such
as distributed generation integration and reduction in
the peak demand. There is, therefore, an urgent need to
understand the impact of storage on distribution system
planning. In this paper, we focus on the effect of
storage on the loading of neighbourhood pole-top
transformers. We apply a probabilistic sizing technique
originally developed for sizing buffers and
communication links in telecommunications networks to
jointly size storage and transformers in the
distribution network. This allows us to compute the
potential gains from transformer upgrade deferral due
to the addition of storage. We validate our results
through numerical simulation using measurements of home
load in a testbed of 20 homes and demonstrate that our
guidelines allow local distribution companies to defer
trans- former upgrades without reducing reliability.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Chiu:2012:EGB,
author = "David Chiu and Christopher Stewart and Bart McManus",
title = "Electric grid balancing through low-cost workload
migration",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "48--52",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425259",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Energy production must continuously match demand on
the electric grid. A deficiency can lead to service
disruptions, and a surplus can place tremendous stress
on grid components, potentially causing major
blackouts. To manage this balance, grid operators must
increase or lower power generation, with only a few
minutes to react. The grid balancing problem has also
impeded the pace of integrating bountiful renewable
resources (e.g., wind), whose generation is
intermittent. An emerging plan to mitigate this problem
is demand response, i.e., for grid operators to alter
the electricity usage behavior of the masses through
real-time price signals. But due to prohibitively high
infrastructure costs and societal-scale adoption,
tangible demand response mechanisms have so far been
elusive. We believe that altering the usage patterns of
a multitude of data centers can be a tangible, albeit
initial, step towards affecting demand response.
Growing in both density and size, today's data center
designs are shaped by the increasing awareness of
energy costs and carbon footprint. We posit that
shifting computational workloads (and thus, demand)
across geographic regions to match electricity supply
may help balance the grid. In this paper we will first
present a real grid balancing problem experienced in
the Pacfic Northwest. We then propose a symbiotic
relationship between data centers and grid operators by
showing that mutual cost benefits can be accessible.
Finally, we argue for a low cost workload migration
mechanism, and pose overarching challenges in designing
this framework.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Menasche:2012:SAP,
author = "Daniel S. Menasch{\'e} and Rosa Maria Meri Le{\"a}o
and Edmundo {de Souza e Silva} and Alberto Avritzer and
Sindhu Suresh and Kishor Trivedi and Raymond A. Marie
and Lucia Happe and Anne Koziolek",
title = "Survivability analysis of power distribution in smart
grids with active and reactive power modeling",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "53--57",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425260",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Coffman:2012:UDA,
author = "E. G. {Coffman, Jr.} and Y. Kogan and W. Lai and V.
Ramaswami",
title = "Uptime and downtime analysis for hierarchical
redundant systems in telecommunications",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "59--61",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425262",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider non-degradable hierarchical redundant
systems having multiple working and failure modes with
restoration time depending on failure type. We evaluate
these systems using two measures: generalized uptime
and traditional downtime. We define the Impact Weighted
System Uptime (IWSU) and illustrate its usefulness in
practical terms, viz., an IP router. Next, we provide
an analysis that fits the downtimes by a heavy-tailed
log PH distribution. For these downtime distributions,
we study whether it is more cost effective to reduce
failure rates or to speed up the response to failures
The first option is a vendor problem, but the second is
a service provider problem. A numerical example is
given to help appreciate the tradeoff.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Avrachenkov:2012:OCC,
author = "K. Avrachenkov and U. Ayesta and J. Doncel and P.
Jacko",
title = "Optimal congestion control of {TCP} flows for
{Internet} routers",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "62--64",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425263",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work we address the problem of fast and fair
transmission of flows in a router, which is a
fundamental issue in networks like the Internet. We
model the interaction between a TCP source and a
bottleneck queue with the objective of designing
optimal packet admission controls in the bottleneck
queue. We focus on the relaxed version of the problem
obtained by relaxing the fixed buffer capacity
constraint that must be satisfied at all time epoch.
The relaxation allows us to reduce the multi-ow problem
into a family of single-ow problems, for which we can
analyze both theoretically and numerically the
existence of optimal control policies of special
structure. In particular, we show that for a variety of
parameters, TCP ows can be optimally controlled in
routers by so-called index policies. We have
implemented index policies in Network Simulator-3
(NS-3) and compared its performance with DropTail and
RED buffers. The simulation results show that the index
policy has several desirable properties with respect to
fairness and efficiency.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Schorgendorfer:2012:TLB,
author = "Angela Sch{\"o}rgendorfer and Peter M. van de Ven and
Bo Zhang",
title = "Temporal load balancing for distributed backup
scheduling",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "65--67",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425264",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Rochman:2012:ERM,
author = "Yuval Rochman and Hanoch Levy and Eli Brosh",
title = "Efficient replication in multi-regional peer-supported
{VoD} systems",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "68--70",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425265",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Borgs:2012:PQ,
author = "Christian Borgs and Jennifer T. Chayes and Sherwin
Doroudi and Mor Harchol-Balter and Kuang Xu",
title = "Pricing and queueing",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "71--73",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425266",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider a pricing in a single observable queue,
where customers all have the same valuation, V, and
the same waiting cost, v. It is known that earning rate
is maximized in such a model when state-dependent
pricing is used and an admissions threshold is deployed
whereby arriving customers may not join the queue if
the total number of customers exceeds this threshold.
This paper is the first to explicitly derive the
optimal threshold. We use our explicit formulation to
obtain asymptotic results on how the threshold grows
with V.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Godtschalk:2012:SBR,
author = "Antonie S. Godtschalk and Florin Ciucu",
title = "Stochastic bounds for randomized load balancing",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "74--76",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425267",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Randomized load balancing is a cost efficient policy
for job scheduling in parallel server queueing systems
whereby, with every incoming job, a central dispatcher
randomly polls some servers and selects the one with
the smallest queue. By exactly deriving the jobs' delay
distribution in such systems, in explicit and closed
form, Mitzenmacher [5] proved the so-called
`power-of-two' result, which states that by randomly
polling only two servers yields an exponential
improvement in delay over randomly selecting a single
server. Such a fundamental result, however, was
obtained in an asymptotic regime in the total number of
servers, and does do not necessarily provide accurate
estimates for practical finite regimes with small or
moderate number of servers. In this paper we obtain
stochastic lower and upper bounds on the jobs' average
delay in non-asymptotic regimes, by borrowing ideas for
analyzing the particular case of the
Join-the-Shortest-Queue (JSQ) policy. Numerical
illustrations indicate not only that the obtained
bounds are remarkably accurate, but also that the
existing exact but asymptotic results can be largely
misleading in some finite regimes.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Myers:2012:EQL,
author = "Daniel S. Myers and Mary K. Vernon",
title = "Estimating queue length distributions for queues with
random arrivals",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "77--79",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425268",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "This work develops an accurate and efficient
two-moment approximation for the queue length
distribution in the M/G/1 queue. Queue length
distributions can provide insight into the impact of
system design changes that go beyond simple averages,
but conventional queueing theory lacks efficient
techniques for estimating the long-run queue length
distribution when service times are not exponential.
The approximate queue lengths depend on only the first
and second moments of the service time rather than the
full service time distribution, resulting in a model
that is applicable to a wide variety of systems.
Validation results show that the new approximation is
highly accurate for light-tailed service time
distributions. Work in progress includes developing
accurate approximations for multi-server queues and
heavy-tailed service distributions.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Cremonesi:2012:MRT,
author = "Paolo Cremonesi and Andrea Sansottera",
title = "Modeling response times in the {Google ROADEF\slash
EURO} challenge",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "80--82",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425269",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this paper, we extend the machine reassignment
model proposed by Google for the ROADEF/EURO Challenge.
The aim of the challenge is to develop algorithms for
the efficient solutions of data-center consolidation
problems. The problem stated in the challenge mainly
focus on dependability requirements and does not take
into account performance requirements (end-to-end
response times). We extend the Google problem
definition by modeling and constraining end-to-end
response times. We provide experimental results to show
the effectiveness of this extension.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tan:2012:PLSb,
author = "Yue Tan and Yingdong Lu and Cathy H. Xia",
title = "Provisioning for large scale loss network systems with
applications in cloud computing",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "83--85",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425270",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Pal:2012:CCT,
author = "Ranjan Pal and Pan Hui",
title = "{CyberInsurance} for cybersecurity a topological take
on modulating insurance premiums",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "86--88",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425271",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "A recent conjecture in cyber-insurance research states
that for compulsory monopolistic insurance scenarios,
charging fines and rebates on fair premiums will
incentivize network users to invest in self-defense
investments, thereby making cyber-space more robust.
Assuming the validity of the conjecture in this paper,
we adopt a topological perspective in proposing a
mechanism that accounts for (i) the positive
externalities posed (through self-defense investments)
by network users on their peers, and (ii) network
location (based on centrality measures) of users, and
provides an appropriate way to proportionally allocate
fines/rebates on user premiums. We mathematically
justify (via a game-theoretic analysis) that optimal
fine/rebates per user should be allocated in proportion
to the Bonacich or eigenvector centrality value of the
user.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Elahi:2012:MFD,
author = "Maryam Elahi and Carey Williamson and Philipp
Woelfel",
title = "Meeting the fairness deadline in speed scaling
systems: is turbocharging enough?",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "89--91",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425272",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In this work, we explore the notion of 'turbocharging'
in speed scaling systems, and ask whether this is
sufficient to preserve the strong dominance property of
FSP over PS. The answer turns out to be no, but the
analysis yields useful insights into the design of
speed scaling systems that can outperform PS in
response time, energy consumption, or perhaps both.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bachmat:2012:ASQ,
author = "Eitan Bachmat and Assaf Natanzon",
title = "Analysis of {SITA} queues with many servers and
spacetime geometry",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "92--94",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425273",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Bonald:2012:RSS,
author = "Thomas Bonald and Davide Cuda",
title = "{RateOptimal} scheduling schemes for asynchronous
{InputQueued} packet switches",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "95--97",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425274",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The performance of input-queued packet switches
critically depends on the scheduling scheme that
connects the input ports to the output ports. We show
that, when packets are switched asynchronously, simple
scheduling schemes where contention is solved locally
at each input or output can achieve rate optimality,
without any speed-up of the internal transmission
rate.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lin:2012:OOS,
author = "Minghong Lin and Adam Wierman and Alan Roytman and
Adam Meyerson and Lachlan L. H. Andrew",
title = "Online optimization with switching cost",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "98--100",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425275",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider algorithms for ``smoothed online convex
optimization (SOCO)'' problems. SOCO is a variant of
the class of ``online convex optimization (OCO)''
problems that is strongly related to the class of
``metrical task systems'', each of which have been
studied extensively. Prior literature on these problems
has focused on two performance metrics: regret and
competitive ratio. There exist known algorithms with
sublinear regret and known algorithms with constant
competitive ratios; however no known algorithms achieve
both. In this paper, we show that this is due to a
fundamental incompatibility between regret and the
competitive ratio --- no algorithm (deterministic or
randomized) can achieve sublinear regret and a constant
competitive ratio, even in the case when the objective
functions are linear.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Blaszczyszyn:2012:FVW,
author = "B. Blaszczyszyn and K. Gaurav",
title = "Farout vertices in weighted repeated configuration
model",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "100--103",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425276",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We consider an edge-weighted uniform random graph with
a given degree sequence (Repeated Configuration Model)
which is a useful approximation for many real-world
networks. It has been observed that the vertices which
are separated from the rest of the graph by a distance
exceeding certain threshold play an important role in
determining some global properties of the graph like
diameter, ooding time etc., in spite of being
statistically rare. We give a convergence result for
the distribution of the number of such far-out
vertices. We also make a conjecture about how this
relates to the longest edge of the minimal spanning
tree on the graph under consideration.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Papadopoulos:2012:RGG,
author = "Fragkiskos Papadopoulos and Constantinos Psomas and
Dmitri Krioukov",
title = "Replaying the geometric growth of complex networks and
application to the {AS Internet}",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "104--106",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425277",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Tizghadam:2012:NCV,
author = "Ali Tizghadam and Weiwei Li and Alberto Leon-Garcia",
title = "Network criticality in vehicular networks",
journal = j-SIGMETRICS,
volume = "40",
number = "3",
pages = "107--109",
month = dec,
year = "2012",
CODEN = "????",
DOI = "https://doi.org/10.1145/2425248.2425278",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:20 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Network criticality (resistance distance) is a
graph-theoretic metric that quantifies network
robustness, and that was originally designed to capture
the effect of environmental changes in core
communication networks. This paper establishes a
relationship between information centrality and network
criticality and provides a justification for using the
average network criticality of a node to quantify the
nodes relative importance in a graph.This results
provides a basis for designing robust clustering
algorithms for vehicular networks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Lui:2013:SPC,
author = "John C. S. Lui and Li Zhang",
title = "A study of pricing for cloud resources",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "3--12",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479944",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "We present a study of pricing cloud resources in this
position paper. Our objective is to explore and
understand the interplay between economics and systems
designs proposed by recent research. We develop a
general model that captures the resource needs of
various applications and usage pricing of cloud
computing. We show that a uniform price does not suffer
any revenue loss compared to first-order price
discrimination. We then consider alternative strategies
that a provider can use to improve revenue, including
resource throttling and performance guarantees, enabled
by recent technical developments. We prove that
throttling achieves the maximum revenue at the expense
of tenant surplus, while providing performance
guarantees with an extra fee is a fairer solution for
both parties. We further extend the model to
incorporate the cost aspect of the problem, and the
possibility of right-sizing capacity. We reveal another
interesting insight that in some cases, instead of
focusing on right-sizing, the provider should work on
the demand and revenue side of the equation, and
pricing is a more feasible and simpler solution. Our
claims are evaluated through extensive trace-driven
simulations with real-world workloads.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Zhang:2013:SCI,
author = "Zhizhong Zhang and Chuan Wu and David W. L. Cheung",
title = "A survey on cloud interoperability: taxonomies,
standards, and practice",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "13--22",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479945",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "Cloud computing is a new computing paradigm that
allows users with different computing demands to access
a shared pool of configurable computing resources
(e.g., servers, network, storage, database,
applications and services). Many commercial cloud
providers have emerged in the past 6-7 years, and each
typically provides its own cloud infrastructure, APIs
and application description formats to access the cloud
resources, as well as support for service level
agreements (SLAs). Such vendor lock-in has seriously
limited the flexibility that cloud end users would like
to process, when it comes to deploy applications over
different infrastructures in different geographic
locations, or to migrate a service from one provider's
cloud to another. To enable seamless sharing of
resources from a pool of cloud providers, efforts have
emerged recently to facilitate cloud interoperability,
i.e., the ability for multiple cloud providers to work
together, from both the industry and academia. In this
article, we conduct a comprehensive survey on the
state-of-the-art efforts, with a focus on
interoperability among different IaaS (infrastructure
as a service) cloud platforms. We investigate the
existing studies on taxonomies and standardization of
cloud interoperability, as well as practical cloud
technologies from both the cloud provider's and user's
perspectives to enable interoperation. We pose issues
and challenges to advance the topic area, and hope to
pave a way for the forthcoming research.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Yang:2013:FPE,
author = "Lei Yang and Jiannong Cao and Yin Yuan and Tao Li and
Andy Han and Alvin Chan",
title = "A framework for partitioning and execution of data
stream applications in mobile cloud computing",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "23--32",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479946",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "The contribution of cloud computing and mobile
computing technologies lead to the newly emerging
mobile cloud computing paradigm. Three major approaches
have been proposed for mobile cloud applications: (1)
extending the access to cloud services to mobile
devices; (2) enabling mobile devices to work
collaboratively as cloud resource providers; (3)
augmenting the execution of mobile applications on
portable devices using cloud resources. In this paper,
we focus on the third approach in supporting mobile
data stream applications. More specifically, we study
how to optimize the computation partitioning of a data
stream application between mobile and cloud to achieve
maximum speed/throughput in processing the streaming
data. To the best of our knowledge, it is the first
work to study the partitioning problem for mobile data
stream applications, where the optimization is placed
on achieving high throughput of processing the
streaming data rather than minimizing the makespan of
executions as in other applications. We first propose a
framework to provide runtime support for the dynamic
computation partitioning and execution of the
application. Different from existing works, the
framework not only allows the dynamic partitioning for
a single user but also supports the sharing of
computation instances among multiple users in the cloud
to achieve efficient utilization of the underlying
cloud resources. Meanwhile, the framework has better
scalability because it is designed on the elastic cloud
fabrics. Based on the framework, we design a genetic
algorithm for optimal computation partition. Both
numerical evaluation and real world experiment have
been performed, and the results show that the
partitioned application can achieve at least two times
better performance in terms of throughput than the
application without partitioning.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Wang:2013:TOA,
author = "Weina Wang and Kai Zhu and Lei Ying and Jian Tan and
Li Zhang",
title = "A throughput optimal algorithm for map task scheduling
in {MapReduce} with data locality",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "33--42",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479947",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "MapReduce/Hadoop framework has been widely used to
process large-scale datasets on computing clusters.
Scheduling map tasks to improve data locality is
crucial to the performance of MapReduce. Many works
have been devoted to increasing data locality for
better efficiency. However, to the best of our
knowledge, fundamental limits of MapReduce computing
clusters with data locality, including the capacity
region and throughput optimal algorithms, have not been
studied. In this paper, we address these problems from
a stochastic network perspective. Our focus is to
strike the right balance between data-locality and
load-balancing to maximize throughput. We present a new
queueing architecture and propose a map task scheduling
algorithm constituted by the Join the Shortest Queue
policy together with the MaxWeight policy. We identify
an outer bound on the capacity region, and then prove
that the proposed algorithm can stabilize any arrival
rate vector strictly within this outer bound. It shows
that the algorithm is throughput optimal and the outer
bound coincides with the actual capacity region. The
proofs in this paper deal with random processing time
with different parameters and nonpreemptive tasks,
which differentiate our work from many other works, so
the proof technique itself is also a contribution of
this paper.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Huang:2013:ESC,
author = "Qun Huang and Patrick P. C. Lee",
title = "An experimental study of cascading performance
interference in a virtualized environment",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "43--52",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479948",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
abstract = "In a consolidated virtualized environment, multiple
virtual machines (VMs) are hosted atop a shared
physical substrate. They share the underlying hardware
resources as well as the software virtualization
components. Thus, one VM can generate performance
interference to another co-resident VM. This work
explores the adverse impact of performance interference
from a security perspective. We present a new class of
attacks, namely the cascade attacks, in which an
adversary seeks to generate performance interference
using a malicious VM. One distinct property of the
cascade attacks is that when the malicious VM exhausts
one type of hardware resources, it will bring
``cascading'' interference to another type of hardware
resources. We present four different implementations of
cascade attacks and evaluate their effectiveness atop
the Xen virtualization platform. We show that a victim
VM can see significant performance degradation (e.g.,
throughput drops in network and disk I/Os) due to the
cascade attacks.",
acknowledgement = ack-nhfb,
ajournal = "Perform. Eval. Rev.",
fjournal = "ACM SIGMETRICS Performance Evaluation Review",
journal-URL = "https://dl.acm.org/loi/sigmetrics",
}
@Article{Singh:2013:AMW,
author = "Rahul Singh and Prashant Shenoy and Maitreya Natu and
Vaishali Sadaphal and Harrick Vin",
title = "Analytical modeling for what-if analysis in complex
cloud computing applications",
journal = j-SIGMETRICS,
volume = "40",
number = "4",
pages = "53--62",
month = mar,
year = "2013",
CODEN = "????",
DOI = "https://doi.org/10.1145/2479942.2479949",
ISSN = "0163-5999 (print), 1557-9484 (electronic)",
ISSN-L = "0163-5999",
bibdate = "Sun May 5 09:58:21 MDT 2013",
bibsource = "https://www.math.utah