Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.58",
%%%     date            = "09 September 2024",
%%%     time            = "16:51:01 MDT",
%%%     filename        = "sigmetrics.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "https://www.math.utah.edu/~beebe",
%%%     checksum        = "42451 130063 711262 6604437",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "ACM SIGMETRICS Performance Evaluation Review;
%%%                        BibTeX; bibliography; data base; database",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a BibTeX bibliography for ACM
%%%                        SIGMETRICS Performance Evaluation Review,
%%%                        the newsletter of the ACM Special Interest
%%%                        Group for the computer/communication system
%%%                        performance community.
%%%
%%%                        The journal has a World Wide Web site at
%%%
%%%                            http://www.acm.org/sigmetrics/
%%%                            http://www.sigmetrics.org/
%%%
%%%                        with issue tables of contents at
%%%
%%%                            https://dl.acm.org/loi/sigmetrics
%%%
%%%                        At version 1.58, the COMPLETE year coverage
%%%                        looked like this:
%%%
%%%                             1972 (   6)    1990 (  59)    2008 ( 103)
%%%                             1973 (  13)    1991 (  49)    2009 (  87)
%%%                             1974 (  27)    1992 (  52)    2010 (  96)
%%%                             1975 (  12)    1993 (  45)    2011 ( 127)
%%%                             1976 (  25)    1994 (  39)    2012 ( 160)
%%%                             1977 (  13)    1995 (  50)    2013 ( 108)
%%%                             1978 (  34)    1996 (  30)    2014 ( 143)
%%%                             1979 (  38)    1997 (  38)    2015 ( 119)
%%%                             1980 (  48)    1998 (  56)    2016 (  97)
%%%                             1981 (  91)    1999 (  61)    2017 ( 128)
%%%                             1982 (  50)    2000 (  63)    2018 ( 160)
%%%                             1983 (   0)    2001 (  88)    2019 (  64)
%%%                             1984 (  33)    2002 (  67)    2020 (  69)
%%%                             1985 (  31)    2003 (  35)    2021 ( 111)
%%%                             1986 (  34)    2004 (  90)    2022 (  48)
%%%                             1987 (  30)    2005 (  90)    2023 ( 110)
%%%                             1988 (  31)    2006 (  79)    2024 (  91)
%%%                             1989 (  28)    2007 ( 101)
%%%
%%%                             Article:       3457
%%%
%%%                             Total entries: 3457
%%%
%%%                        This bibliography was initially built from
%%%                        searches in the ACM Portal database.
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the companion
%%%                        file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen as
%%%                        name:year:abbrev, where name is the family
%%%                        name of the first author or editor, year is a
%%%                        4-digit number, and abbrev is a 3-letter
%%%                        condensation of important title words.
%%%                        Citation labels were automatically generated
%%%                        by software developed for the BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, with the help of
%%%                        ``bibsort -byvolume''.  The bibsort utility
%%%                        is available from ftp.math.utah.edu in
%%%                        /pub/tex/bib.
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{
    "\hyphenation{ }" #
    "\ifx \undefined \circled \def \circled #1{(#1)}\fi" #
    "\ifx \undefined \reg \def \reg {\circled{R}}\fi"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-SIGMETRICS            = "ACM SIGMETRICS Performance Evaluation Review"}

%%% ====================================================================
%%% Publishers and their addresses:
@String{pub-ACM                 = "ACM Press"}
%%%
@String{pub-ACM:adr             = "New York, NY 10036, USA"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Keirstead:1972:STC,
  author =       "Ralph E. Keirstead and Donn B. Parker",
  title =        "Software testing and certification",
  journal =      j-SIGMETRICS,
  volume =       "1",
  number =       "1",
  pages =        "3--8",
  month =        mar,
  year =         "1972",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041596.1041597",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:42 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Society needs a continuous flow of upgradding products
                 and services which are responsive to needs, are
                 reliable, cost-effective and safe. When this does not
                 occur, excessive regulation and resulting stifled
                 technology and production results. Excesses in both
                 directions have occurred in other fields such as
                 medicine, the automobile industry, petro-chemicals,
                 motion pictures, building construction and
                 pharmaceuticals. Disasters based on poor design and
                 implementation in information processing have occurred
                 in ballot-counting systems, law enforcement systems,
                 billing systems, credit systems and dating services.
                 Business has been undersold and oversold and sometimes
                 reached the brink of ruin in its increasing reliance on
                 computer systems. The only answer is a balanced degree
                 of self-regulation. Such self-regulation for software
                 systems is presented here.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bell:1972:CME,
  author =       "Thomas E. Bell",
  title =        "Computer measurement and evaluation: artistry, or
                 science?",
  journal =      j-SIGMETRICS,
  volume =       "1",
  number =       "2",
  pages =        "4--10",
  month =        jun,
  year =         "1972",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113640.1113641",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Effort invested in computer measurement and evaluation
                 is clearly increasing, but the results of this
                 increasing investment may be unfortunate. The
                 undeniable value of the results and the enthusiasm of
                 participants may be leading to unrealizable
                 expectations. The present artistry needs to be
                 converted into a science for achieving a solid future;
                 the most fruitful direction may be the synthesis of
                 individual, empirical discoveries combined with testing
                 hypotheses about performance relationships.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Palme:1972:BGM,
  author =       "Jacob Palme",
  title =        "Beware of the {Gibson} mix",
  journal =      j-SIGMETRICS,
  volume =       "1",
  number =       "2",
  pages =        "10--11",
  month =        jun,
  year =         "1972",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113640.1113642",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Evaluation of computer systems is sometimes made using
                 a so-called Gibson mix. This is a list of common
                 machine instructions with weights depending on how
                 often they are supposed to occur in typical programs.
                 By using these weights to estimate the mean instruction
                 execution time, the `speed' of a computer system is
                 supposed to be measured.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Johnson:1972:SST,
  author =       "Robert R. Johnson",
  title =        "Some steps toward an information system performance
                 theory",
  journal =      j-SIGMETRICS,
  volume =       "1",
  number =       "3",
  pages =        "4--15",
  month =        sep,
  year =         "1972",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041599.1041600",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A means for representing information handling systems
                 at the problem, program, and computer level is
                 presented. This means, Petri Nets, coupled with
                 classical information theory, provides quantitative
                 measures of system capacity and thruput as well
                 measures of `the work done.' Concepts of
                 information-capacity and of information-work are
                 derived from these probabilistically labeled Petri Nets
                 based on analogies to thermodynamics. Thruput is
                 measured as information-gain. Comments are made about
                 the possible significance of these concepts, their
                 relationship to classical thermodynamics, and the
                 directions of continuing thought stimulated by these
                 concepts.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kernighan:1972:CAO,
  author =       "B. W. Kernighan and P. J. Plauger and D. J. Plauger",
  title =        "On comparing apples and oranges, or, my machine is
                 better than your machine",
  journal =      j-SIGMETRICS,
  volume =       "1",
  number =       "3",
  pages =        "16--20",
  month =        sep,
  year =         "1972",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041599.1041601",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In a recent comparison test, six computer
                 manufacturers were asked to code a particular program
                 loop to run as quickly as possible on their machine.
                 Presumably conclusions about the merits of the machines
                 were to be drawn from the resulting code. We have
                 reduced the number of Instructions for the loop by an
                 average of one instruction per machine, a 15\%
                 decrease. It appears that conclusions might more
                 appropriately be drawn about manufacturers' software.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lynch:1972:DDA,
  author =       "W. C. Lynch",
  title =        "Do disk arms move?",
  journal =      j-SIGMETRICS,
  volume =       "1",
  number =       "4",
  pages =        "3--16",
  month =        dec,
  year =         "1972",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041603.1041604",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:54 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Measurement of the lengths of disk arm movements in a
                 2314 disk storage facility of an IBM 360/67 operating
                 under the Michigan Terminal System yielded the
                 unexpected data that the arms need not move in 63\% of
                 the accesses and need move for an average of only 30ms.
                 in the remaining 37\% of the cases. A description and
                 analysis of a possible mechanism of action is
                 presented. The predictions of this model do not
                 disagree with the measured data.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Halstead:1973:LLM,
  author =       "M. H. Halstead",
  title =        "Language level, a missing concept in information
                 theory",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "1",
  pages =        "7--9",
  month =        mar,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041606.1041607",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "According to Information Theory, [Cf Leon Brillouin,
                 Science and Information Theory, Academic Press, N. Y.
                 1956, pp. 292-3], the information content of a table of
                 numbers does not depend upon how difficult it was to
                 obtain the entries in the table, but only upon whether
                 or not we know how, or how precisely we know how, to
                 reconstruct the entire table or any parts of it.
                 Consequently, from present Information Theory, since we
                 `know in advance' how a table of since is constructed,
                 such a table contains absolutely no information. For a
                 person who does not `know in advance' how to construct
                 a table of sines, however, the table would indeed
                 contain `Information.' This ambiguity apparently
                 contradicts the basic statement [Leon Brillouin, op.
                 cit., page 10] that `Information is an absolute
                 quantity which has the same numerical value for any
                 observer,' a contradiction which remains even when we
                 accept Brillouin's next statement that `The human value
                 of the information, on the other hand, would
                 necessarily be a relative quantity, and would have
                 different values for different observers, according to
                 the possibility of their understanding it and using it
                 later.'",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Halstead:1973:EDP,
  author =       "M. H. Halstead",
  title =        "An experimental determination of the `purity' of a
                 trivial algorithm",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "1",
  pages =        "10--15",
  month =        mar,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041606.1041608",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Recent work in an area which might be designated as
                 Software Physics [1,2,3,4,5,6] has suggested that the
                 basic structure of algorithms may offer an interesting
                 field for experimental research. Such an experiment is
                 reported here. In an earlier paper [2], it was
                 suggested that a `Second Law' might be stated as:'The
                 internal quality, LV, of a pure algorithm is
                 independent of the language in which it is
                 expressed.'",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Denning:1973:RSC,
  author =       "Peter J. Denning",
  title =        "Review of {`Statistical Computer Performance
                 Evaluation' by Walter Frieberger; Academic Press
                 (1972)}",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "1",
  pages =        "16--22",
  month =        mar,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041606.1041611",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This book is the proceedings of a conference held at
                 Brown University on November 22-23, 1971. The editors
                 state that only papers dealing with real data in a
                 reasonably sophisticated manner were accepted for the
                 conference. Papers dealing simply with the collection
                 of data, or with queueing-theoretic models, were
                 excluded. The papers are grouped into seven sections
                 corresponding to the seven sessions at the conference;
                 at the end of each section is a brief statement by the
                 one or two discussants of that session's papers.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Svobodova:1973:CSN,
  author =       "Liba Svobodova",
  title =        "Communications: Some notes on the {Computer Synectics}
                 hardware monitor sum",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "1",
  pages =        "23--25",
  month =        mar,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041606.1041609",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:49:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The longer I have been working with the hardware
                 monitor SUM, a device designed and manufactured by the
                 Computer Synectics, the less I have been pleased.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ishida:1973:JSU,
  author =       "Haruhisa Ishida and Nobumasa Takahashi",
  title =        "Job statistics at a 2000-user university computer
                 center",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "2",
  pages =        "2--13",
  month =        jun,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113644.1113645",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:06 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Computer Centre at the University of Tokyo is one
                 of 7 large university centers serving researchers
                 throughout Japan; it processes 120,000 jobs annually
                 submitted by 2,000 academic users in various research
                 institutions. A brief comparison of the 7 centers and
                 the breakdown of users are shown. To clarify the job
                 characteristics of these users, account data of all
                 jobs in an entire year were analyzed and the results
                 are presented. They are shown in terms of the
                 distribution of CPU time, numbers of input cards/output
                 pages/output cards, program size, job end conditions
                 and turnaround time etc. A special on-line card punch
                 is mentioned which punches holes in the 13th row to
                 separate output card decks. It was found that, when the
                 CPU speed was increased 8 times after replacement under
                 the same operating system, the average job size was
                 increased 4 times. Hence only twice as many jobs could
                 be processed. The results of analysis have been used
                 for systems performance evaluation (for example, the
                 CPU busy-rate was found to be 69\%), improvement and
                 for an input job model used in planning for the next
                 system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rice:1973:AMC,
  author =       "Don R. Rice",
  title =        "An analytical model for computer system performance
                 evaluation",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "2",
  pages =        "14--30",
  month =        jun,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113644.1113646",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:06 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes an analytical model of a computer
                 system useful in the evaluation of system performance.
                 The model is described in detail while the mathematics
                 are minimized. Emphasis is placed on the utility of the
                 model rather than the underlying theory and a number of
                 illustrative examples are included.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kolence:1973:SE,
  author =       "Kenneth W. Kolence",
  title =        "The software empiricist",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "2",
  pages =        "31--36",
  month =        jun,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113644.1113647",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:06 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The advent of software and hardware monitoring
                 technology has presented us with a flood of data,
                 without bringing commensurate understanding by which to
                 interpret it. Thus, the most important problem before
                 us in the field of computer measurement is to discover
                 the relationships between the variables we measure and
                 the overall system properties of interest.
                 Particularly, we wish to be able to predict system
                 behavior and performance from a knowledge of the values
                 of factors under our control. In this way, not only
                 will we understand the meanings of these variables, but
                 we shall learn how to design our systems to perform as
                 we wish them to. The latter is a prime goal of software
                 engineering, the former the rational of what has been
                 called software physics. In this section of the Review
                 we are and shall be interested in the empirical
                 development of such an understanding, and the
                 experimental aspects of computer measurement. Our
                 intent is to assist in the building of a solid body of
                 knowledge by providing a publication vehicle for
                 empirical and experimental data. That is, we have
                 little interest in publishing theory, which can
                 normally be done elsewhere. Our goal is to publish
                 experimental data to support or refute theory, and
                 empirical data from which theory builders may take
                 their inspiration.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kolence:1973:SUP,
  author =       "Kenneth W. Kolence and Philip J. Kiviat",
  title =        "Software unit profiles \& {Kiviat} figures",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "3",
  pages =        "2--12",
  month =        sep,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041613.1041614",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In the June, 1973 issue of the {\em Performance
                 Evaluation Review}, the concept of using circular
                 graphs (called Kiviat graphs by Kolence) to present
                 system performance data was introduced in the column
                 {\em The Software Empiricist}. In this article we wish
                 to report on some recent work in using such graphs to
                 present system and program profiles in a strikingly
                 visual way of potential use to all practitioners of
                 computer measurement. In discussing this data, we find
                 it necessary to comment on the meaning of the variables
                 used for such profiles in a way which also should be of
                 interest to practitioners.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Denning:1973:WOA,
  author =       "Peter J. Denning",
  title =        "Why our approach to performance evaluation is
                 {SDRAWKCAB}",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "3",
  pages =        "13--16",
  month =        sep,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041613.1041615",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "What does SDRAWKCAB mean? Some of you already know;
                 some I have told; some have guessed. But many do not
                 know. Those who do know, know it would be contrary to
                 the theme of SDRAWKCAB to tell you immediately what it
                 means, although it certainly would make things much
                 easier if I told you now.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Beck:1973:CSL,
  author =       "Norman Beck and Gordon Ashby",
  title =        "On cost of static linking and loading of subprograms",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "3",
  pages =        "17--20",
  month =        sep,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041613.1041616",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The purpose of this paper is to report some data
                 concerning cost in CPU processing due to loading
                 programs. The data was collected on a PDP-10, using
                 modifications made by the linking loader to the
                 prologue generated for FORTRAN complied programs, by
                 the addition of one UUO (a programmed operation similar
                 to an SVC on IBM 360/370), and several cells in the
                 monitor used as counters. The data covers the number of
                 programs loaded and the CPU ms expended loading them.
                 This data is broken down between programs that were
                 loaded and never entered and programs loaded and
                 eventually executed. It is further classified according
                 to periods of heavy use for program development and
                 periods of heavy production use.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kolence:1973:SEE,
  author =       "Ken Kolence",
  title =        "The software empiricist experimental disciplines \&
                 computer measurements",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "3",
  pages =        "21--23",
  month =        sep,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041613.1041617",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The introduction and use of the capability for
                 quantitative measurements into the field of computer
                 science must inexorably lead to the development and use
                 of experimental approaches and techniques to discover,
                 understand, and verify relationships between the
                 observables of what is today loosely called computer
                 performance. The reason for this column appearing as a
                 regular feature in PER is to assist in the process of
                 bridging the gap in both directions between the
                 practitioners and theorists of the field. In the first
                 column in this series, we introduced the concepts of
                 empiricism and the initial discoveries of invariances
                 of values as foundations of this new aspect of computer
                 science. With this issue, we shall begin to investigate
                 the requirements and methodologies by which this
                 approach can be applied to the common benefit of both
                 the practical and theoretical orientations. When a
                 particular topic can be demonstrated with actual data
                 or equivalent means, it will be the topic of a separate
                 article.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hughes:1973:UHM,
  author =       "James Hughes and David Cronshaw",
  title =        "On using a hardware monitor as an intelligent
                 peripheral",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "4",
  pages =        "3--19",
  month =        dec,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113650.1113651",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:20 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Conventionally, hardware monitoring has been performed
                 using manually controlled off-line devices. It is
                 suggested that a hardware monitor incorporating program
                 control and acting as an intelligent peripheral device
                 would realize greater utility and wider application.
                 The development and application of such a device is
                 described; a combination of the merits of both software
                 and hardware monitoring techniques is claimed for it.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Svobodova:1973:MCS,
  author =       "Liba Svobodova",
  title =        "Measuring computer system utilization with a hardware
                 and a hybrid monitor",
  journal =      j-SIGMETRICS,
  volume =       "2",
  number =       "4",
  pages =        "20--34",
  month =        dec,
  year =         "1973",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1113650.1113652",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:20 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer system utilization is generally measured in
                 terms of the utilization of individual system
                 components and the overlap of activities of two or more
                 system components. This type of data can be used to
                 construct a system performance profile [BONN 69, COCI
                 71, SUM 70]. Utilization of a system component is
                 obtained as the ratio (unit busy time)/(total elapsed
                 time). If a particular unit performs more than one type
                 of operation, the unit busy time may be further divided
                 into portions corresponding to different activities and
                 an activity profile can be constructed for each such
                 unit. For a storage unit, information about utilization
                 of different portions of storage might be desirable in
                 addition to utilization of this unit as a whole. A
                 space utilization profile Can be developed in this
                 case. To cover both cases, the term unit utilization
                 profile is used.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wortman:1974:NHR,
  author =       "David B. Wortman",
  title =        "A note on high resolution timing",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "1",
  pages =        "3--9",
  month =        mar,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041619.1041620",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The ability to accurately time the execution of
                 sequences of machine instructions is an important tool
                 in the tuning and evaluation of computer hardware and
                 software. The complexity of modern hardware and
                 software systems often makes accurate timing
                 information difficult to obtain [1]. This note
                 describes an experimental comparison of timing
                 information provided by a large multiprogramming
                 operating system (OS/360 MVT) with timing information
                 derived directly from a high resolution hardware clock.
                 The hardware clock was found to be a superior source of
                 timing information.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Snyder:1974:QSA,
  author =       "Rowan Snyder",
  title =        "A quantitative study of the addition of extended core
                 storage",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "1",
  pages =        "10--33",
  month =        mar,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041619.1041621",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In evaluating computer systems it is necessary to
                 identify the prime determinants of system performance,
                 and to quantify a performance metric. The purpose of
                 this paper is to present a quantitative study of the
                 effects of a significant hardware reconfiguration on
                 some measures of system performance, and thereby
                 demonstrate the effectiveness of Kiviat graphs in
                 performance analysis.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Merrill:1974:TCA,
  author =       "H. E. Barry Merrill",
  title =        "A technique for comparative analysis of {Kiviat}
                 graphs",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "1",
  pages =        "34--39",
  month =        mar,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041619.1041622",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The article in September, 1973 Performance Evaluation
                 Review demonstrated again the utility of the Kiviat
                 Graph as a visual display of system profiles. A simple
                 extension of the concept of the Kiviat Graph permits a
                 realistic (though not necessarily linear) comparison of
                 two Kiviat graphs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Peterson:1974:CSH,
  author =       "Thomas G. Peterson",
  title =        "A comparison of software and hardware monitors",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "2",
  pages =        "2--5",
  month =        jun,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041687.1041688",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Tests were performed to compare the accuracy of two
                 computer system monitors. Specifically, results from a
                 hardware monitor were compared with results from a
                 software monitor. Some of the subreports produced by
                 the software monitor were quite accurate; other
                 subreports were not quite so accurate, but they were
                 consistent from run to run. In view of these test
                 results, it appears that the software monitor can be
                 used to measure the effects of changes made in a system
                 tuning project.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Syms:1974:BCT,
  author =       "Gordon H. Syms",
  title =        "Benchmarked comparison of terminal support systems for
                 {IBM 360} computers",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "2",
  pages =        "6--34",
  month =        jun,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041687.1041689",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A set of terminal scripts and benchmarks were derived
                 for comparing the performance of time sharing and batch
                 computer operating systems. Some of the problems
                 encountered in designing valid benchmarks for comparing
                 computer operating systems under both terminal and
                 batch loads are discussed. The results of comparing
                 TSS/360, CP/67 and MTS time sharing systems for the IBM
                 360/67 over a wide range of load conditions are
                 presented. The results of comparing TSS, MTS and OS/MVT
                 under batch loads are also presented. The tests were
                 conducted with Simplex and Dual processor
                 configurations with 256K bytes to 768K bytes of main
                 memory. The conclusions were quite surprising in that
                 CP/67 running on a minimal system performed
                 competitively with TSS/360 on a much larger dual
                 processor system. With equal configurations CP/67 out
                 performed TSS/360 by a wide margin. Furthermore, MTS
                 providing both batch and terminal support produced
                 performance that was 5 percent to 25 percent better
                 than the split configuration with CP/67 providing the
                 terminal support and OS/MVT providing the batch
                 processing support. Serious performance degradation of
                 the time sharing computer systems from overloading was
                 experienced and a simple solution is suggested to
                 prevent such degradation. The degradation was so severe
                 as to render the performance less than that of a
                 sequential job processor system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Morris:1974:KGC,
  author =       "Michael F. Morris",
  title =        "{Kiviat} graphs: conventions and `figures of merit'",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "3",
  pages =        "2--8",
  month =        oct,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041691.1041692",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:36 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Once in a very great while an idea comes along that
                 quickly captures many imaginations. The circular
                 graphic technique proposed nearly two years ago by Phil
                 Kiviat, our illustrious Chairman, and very
                 appropriately named `Kiviat Graphs' by our erst-while
                 (and sorely missed) `Software Empiricist,' Ken Kolence,
                 is one of these ideas.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lujanac:1974:NSB,
  author =       "Paul L. Lujanac",
  title =        "A note on {Syms}' benchmarked comparison",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "3",
  pages =        "9--10",
  month =        oct,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041691.1041693",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:36 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "If the load factor is expressed linearly as a fraction
                 of the capacity of a terminal-oriented system, we
                 assume that response times increase more or less
                 exponentially with an increase in load factor. Syms'
                 load factor is nonlinear, and, in fact, was designed to
                 `make the terminal response times approximately a
                 linear function of the load factors.'",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Graham:1974:MPB,
  author =       "G. Scott Graham and Peter J. Denning",
  title =        "Multiprogramming and program behavior",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "1--8",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809367",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Dynamic multiprogramming memory management strategies
                 are classified and compared using extant test data.
                 Conclusions about program behavior are then drawn.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brandwain:1974:MPV,
  author =       "A. Brandwain and J. Buzen and E. Gelenbe and D.
                 Potier",
  title =        "A model of performance for virtual memory systems",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "9--9",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809368",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queueing network models are well suited for analyzing
                 certain resource allocation problems associated with
                 operating system design. An example of such a problem
                 is the selection of the level of multiprogramming in
                 virtual memory systems. If the number of programs
                 actively competing for main memory is allowed to reach
                 too high a value, trashing will occur and performance
                 will be seriously degraded. On the other hand,
                 performance may also suffer if the level of
                 multiprogramming drops too low since system resources
                 can become seriously under utilized in this case. Thus
                 it is important for virtual memory systems to maintain
                 optimal or near optimal levels of multiprogramming at
                 all times. This paper presents an analytic model of
                 computer system behavior which can be used to study
                 multiprogramming optimization in virtual memory
                 systems. The model, which explicitly represents the
                 numerous interactions which occur as the level of
                 multiprogramming varies, is used to numerically
                 generate performance curves for representative sets of
                 parameters. A simplified model consisting of a CPU and
                 a single backing store device is then used to derive an
                 approximate expression for the optimal level of
                 multiprogramming. The simplified model is also used to
                 examine the transient behavior of such systems. The
                 mathematical model we present is based on some
                 simplifying assumptions; in particular all programs
                 executing in the system are supposed to be
                 statistically identical. In this respect the model we
                 present must be considered to be a theoretical
                 explanation of a phenomenon (thrashing) observed in
                 certain operating systems rather than an exact
                 representation of reality. Certain assumptions of the
                 mathematical model are relaxed in a simulation model
                 where distribution functions of service times at the
                 secondary memory and input-output devices are
                 arbitrary; by comparison with the theoretical results
                 we see that CPU utilization and throughput are not very
                 sensitive to the specific forms of these distributions
                 and that the usual exponential assumptions yield quite
                 satisfactory results. The simulation model is also
                 programmed to contain overhead. Again we observe that
                 the mathematical model's predictions are in fair
                 agreement with the useful CPU utilization predicted by
                 the simulation experiments.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  xxnote =       "Check: author may be Brandwajn??",
}

@Article{Henderson:1974:OCW,
  author =       "Greg Henderson and Juan Rodriguez-Rosell",
  title =        "The optimal choice of window sizes for working set
                 dispatching",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "10--33",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809369",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concept of varying window size in a working set
                 dispatcher to control working set size and number of
                 page faults is examined. A space-time cost equation is
                 developed and used to compare fixed window size to
                 variable window size for different types of secondary
                 storage based on the simulated execution of real
                 programs. A general approach is indicated for studying
                 the relative merit of the two dispatching algorithms
                 and their interaction with different hardware
                 configurations.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Dispatching; Optimal control; Resource allocation;
                 Supervisory systems; Time-sharing systems; Working
                 set",
}

@Article{Denning:1974:CLP,
  author =       "Peter J. Denning",
  title =        "Comments on a linear paging model",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "34--48",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809370",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The linear approximation relating mean time between
                 page transfers between levels of memory, as reported by
                 Saltzer for Multics, is examined. It is tentatively
                 concluded that this approximation is untenable for main
                 memory, especially under working set policies; and that
                 the linearity of the data for the drum reflects the
                 behavior of the Multics scheduler for background jobs,
                 not the behavior of programs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brice:1974:FCR,
  author =       "Richard S. Brice and J. C. Browne",
  title =        "Feedback coupled resource allocation policies in the
                 multiprogramming-multiprocessor computer system",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "49--53",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809371",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents model studies of some integrated
                 feedback-driven scheduling systems for a
                 multiprogrammed computer system. This abstract can
                 present only the conclusions of the studies and little
                 of the supporting data and detail. The basic format of
                 the analysis is to fix a size for the local buffers and
                 a total size for the collection buffers, to define a
                 set of algorithms for the determination of the data
                 removal quanta to the local buffers, the allocation of
                 space in the collection buffers, and the look-ahead
                 mechanism for input and then to evaluate the relative
                 merits of the various strategies by the resulting CPU
                 efficiency. Three feedback algorithms are studied as
                 examples in this work.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Halachmi:1974:CCT,
  author =       "Baruch Halachmi and W. R. Franta",
  title =        "A closed, cyclic, two-stage multiprogrammed system
                 model and its diffusion approximation solution",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "54--64",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809372",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper attention is focused on closed
                 multiprogrammed computer type systems. In particular,
                 two-stage closed queueing systems are considered. The
                 first stage can be associated with the CPU (Central
                 Processing Unit) and the other with the I/O
                 (Input-Output) operations. For all the models
                 discussed. For the first model we consider the
                 {GI1/MS/N} system, which allows the service times of a
                 single CPU to obey any general probability
                 distribution, with finite variance, while the I/O
                 servers are taken to be exponential. The second model
                 is an extension of the first where the concept of
                 feedback is implemented in the CPU stage. This concept
                 plays an important role in computer environments where
                 the operating system includes the multiplexing or page
                 on demand property. The third model, the {MS1/MS2/N},
                 deals with multiprocessing computer systems where
                 possibly more than one CPU is available, but all
                 servers are assumed to be exponential. In the spirit of
                 the approximation to the GI/G/S open system, as a final
                 model, we construct the approximate solution to the
                 {GIS1/GIS2/N} closed system and discuss the
                 circumstances under which its use is advisable. Several
                 numerical examples for each of the models are given,
                 each accompanied by appropriate simulation results for
                 comparison. It is on the basis of these comparisons
                 that the quality of the suggested diffusion
                 approximations can be judged. The diffusion
                 approximating formulas should be regarded not only as a
                 numerical technique, but also as a simplifying
                 approach, by which deeper insight can be gained into
                 complicated queueing systems. Considerable work remains
                 to be done, using as a methodology the approach, given
                 here, and several possible extensions are presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Schwetman:1974:ATS,
  author =       "H. D. Schwetman",
  title =        "Analysis of a time-sharing subsystem (a preliminary
                 report)",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "65--75",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809373",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The MESA subsystem provides a wide variety of services
                 to remotely located users of the computing facilities
                 of the Purdue University Computing Center. This paper
                 presents the preliminary steps of an in-depth study
                 into the behavior of MESA. The study uses a software
                 data-gathering facility to analyze the usage and
                 queueing aspects of this behavior and to provide values
                 for parameters used by two models of the subsystem.
                 These models, a network-of-queues model and a
                 simulation model, are designed to project subsystem
                 behavior in different operating environments. The paper
                 includes a number of tables and figures which highlight
                 the results, so far, of the study.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Reiser:1974:ASC,
  author =       "M. Reiser and A. G. Konheim",
  title =        "The analysis of storage constraints by a queueing
                 network model with blocking",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "76--81",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809374",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The finite capacity of storage has a significant
                 effect on the performance of a contemporary computer
                 system. Yet it is difficult to formulate this problem
                 and analyze it by existing queueing network models. We
                 present an analysis of an open queueing model with two
                 servers in series in which the second server has finite
                 storage capacity. This network is an exponential
                 service system; the arrival of requests into the system
                 is modeled by a Poisson process (of rate $ \lambda $)
                 and service times in each stage are exponentially
                 distributed (with rates $ \alpha $ and $ \beta $
                 respectively). Requests are served in each stage
                 according to the order of their arrival. The principal
                 characteristic of the service in this network is
                 blocking; when $M$ requests are queued or in service in
                 the second stage, the server in the first stage is
                 blocked and ceases to offer service. Service resumes in
                 the first stage when the queue length in the second
                 stage falls to $ M - 1$. Neuts [1] has studied
                 two-stage blocking networks (without feedback) under
                 more general statistical hypothesis than ours. Our goal
                 is to provide an algorithmic solution which may be more
                 accessible to engineers.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Schatzoff:1974:SVT,
  author =       "M. Schatzoff and C. C. Tillman",
  title =        "Statistical validation of a trace-driven simulator",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "82--93",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809375",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A common problem encountered in computer system
                 simulation is that of validating that the simulator can
                 produce, with a reasonable degree of accuracy, the same
                 information that can be obtained from the modelled
                 system. This is basically a statistical problem because
                 there are usually limitations with respect to the
                 number of controlled tests which can be carried out,
                 and assessment of the fidelity of the model is a
                 function of the signal to noise ratio. That is, the
                 magnitude of error which can be tolerated depends upon
                 the size of the effect to be predicted. In this paper,
                 we describe by example how techniques of statistical
                 design and analysis of experiments have been used to
                 validate the modeling of the dispatching algorithm of a
                 time sharing system. The examples are based on a
                 detailed, trace-driven simulator of CP-67. They show
                 that identical factorial experiments involving
                 parameters of this algorithm, when carried out on both
                 the simulator and on the actual system, produced
                 statistically comparable effects.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ferrari:1974:GPS,
  author =       "Domenico Ferrari and Mark Liu",
  title =        "A general-purpose software measurement tool",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "94--105",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809376",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A software measurement tool designed for the users of
                 PRIME, an interactive system being developed, is
                 presented. The tool, called SMT, allows its user to
                 instrument a program, modify a pre-existing
                 instrumentation and specify how the collected data are
                 to be reduced by typing in a few simple commands. The
                 user can also write his own measurement routines, which
                 specify the actions to be taken at event detection
                 time, and submit them to the SMT; after checking their
                 correctness, the SMT deals with them as with its
                 built-in, standard measurement routines. The design
                 goals of a general-purpose tool like the SMT are
                 discussed, and the prototype version of the tool, which
                 has been implemented, is described from the two
                 distinct viewpoints of a user and of a measurement-tool
                 designer. An example of the application of the
                 prototype to a measurement problem is illustrated, the
                 reasons why not all of the design goals have been
                 achieved in the implementation of the prototype are
                 reviewed, and some of the foreseeable extensions of the
                 SMT are described.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Foley:1974:EDD,
  author =       "James D. Foley and John W. McInroy",
  title =        "An event-driven data collection and analysis facility
                 for a two-computer network",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "106--120",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809377",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper we describe an event-driven data
                 collection facility, and a general-purpose program to
                 perform a set of analyses on the collected data. There
                 are several features which distinguish this facility
                 from others. First, the system being monitored is a
                 network of loosely-coupled computers. Although there
                 are just two computers in the network, the facility
                 could be readily extended to larger networks. Second,
                 the main purpose of the facility is to monitor the
                 execution of interactive graphics application programs
                 whose processing and data are distributed between the
                 network's computers. Third, the data collector and
                 analyzer are readily extendible to treat new kinds of
                 data. This is accomplished by a data and event
                 independent collector, and a table-driven data
                 analyzer.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Batson:1974:MVM,
  author =       "A. P. Batson and R. E. Brundage",
  title =        "Measurements of the virtual memory demands of
                 {Algol-60} programs (Extended Abstract)",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "121--126",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809378",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Programming languages such as Algol-60 use block
                 structure to express the way in which the name space of
                 the current environment, in the contour model (1) sense
                 of that word, changes during program execution. This
                 dynamically-varying name space corresponds to the
                 virtual memory required by the process during its
                 execution on a computer system. The research to be
                 presented is an empirical study of the nature of the
                 memory demands made by a collection of Algol-60
                 programs during execution. The essential
                 characteristics of any such resource request are the
                 amount of memory requested, and the holding time for
                 which the resource is retained and these distributions
                 will be presented for several components of the virtual
                 memory required by the Algol programs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sebastian:1974:HHE,
  author =       "Peter R. Sebastian",
  title =        "{HEMI} ({Hybrid Events Monitoring Instrument})",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "127--139",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809379",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "HEMI is an experimental instrumentation system being
                 developed for use with the CYBER 70 and 170 Series
                 computers in order to ascertain the extent to which an
                 integrated approach to instrumentation is economically
                 and technologically viable for performance measurement
                 and evaluation purposes. HEMI takes advantage of the
                 distributed CYBER computer architecture. This consists
                 of a pool of Peripheral Processors (PPs) --- (mainly
                 dedicated to I/O and system tasks) while the CPU
                 capabilities are reserved mostly for computation;
                 Central Memory constitutes the communications link.
                 HEMI uses one of the PPs as its major processor. A
                 hardware data acquisition front end is interfaced to
                 one of the I/O channels and driven by the PP. Hardware
                 probes sample events at suitable testpoints, while the
                 PP has software access to Central Memory (Operating
                 System tables and parameters), Status Registers, I/O
                 Channel Flags, etc. A data reduction package is used to
                 produce a variety of reports from the data collected. A
                 limited on-line data reduction and display capability
                 is also provided. This paper will describe the current
                 status of the project as well as anticipated
                 applications of HEMI.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cox:1974:IAC,
  author =       "Springer W. Cox",
  title =        "Interpretive analysis of computer system performance",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "140--155",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809380",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A typical performance evaluation consists of the
                 identification of resources, the definition of system
                 boundaries, the measurement of external and internal
                 performance variables, and finally the interpretation
                 of data and projection of system performance to
                 hypothetical environments. These projections may be
                 used to estimate the cost savings to be expected when
                 changes are made to the system. The fundamental
                 external performance measures such as response time and
                 thruput are intimately related, but may be defined
                 differently depending on how the system is defined.
                 They can be analyzed with respect to the internal
                 performance measures (such as activities, queue lengths
                 and busy times) by applying one or more interpretations
                 such as: absolute utilizations, normalized busy times,
                 system profiles, analysis of response, workload
                 relaxation, and resource consumption hyperplanes. These
                 models, which are generally free of assumptions
                 regarding interarrival and service time distributions,
                 can be adjusted to represent potential changes to the
                 system. Then the interpretations may be used to
                 evaluate the predicted external performance measures.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Noe:1974:DYC,
  author =       "J. D. Noe and N. W. Runstein",
  title =        "Develop your computer performance pattern",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "156--165",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809381",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Is the load on your computer shifting? Did that change
                 to faster access disks really help? Would more core
                 memory increase throughput appreciably, or would it be
                 necessary to also increase central processor power?
                 These are three quite different kinds of questions; one
                 concerns detecting a long-term trend, another assessing
                 the effects of a system change, and a third estimating
                 effects of the decision to alter the configuration. Yet
                 all of these require knowledge of current and past
                 system performance, the type of knowledge that must be
                 the result of long-term performance monitoring. This is
                 not simple enough to be picked up overnight or in one
                 series of experiments, nor can it be assessed by
                 watching one or two parameters over a long period. One
                 must have a thorough understanding of the pattern of
                 performance by knowing the mean values of a number of
                 measures and knowing something about the variations
                 from these means. This paper hardly needs to recommend
                 that computer managers establish an understanding of
                 performance pattern; they already are very conscious of
                 the need. What it does is recount development of a
                 method of doing so for the CDC 6400 at the University
                 of Washington and of the selection of ``Kiviat Graphs''
                 as a means to present data in a synoptic form. The
                 remainder of this paper will give a brief account of
                 the authors' experience in designing a measurement
                 system for the CDC 6400 at the University of Washington
                 Computer Center. This will include comments on the
                 approach to deciding what to measure and display for
                 the synoptic view of the system, as well as how to
                 provide more detailed data for backup. Examples of the
                 use of Kiviat Graphs [4] to show the effects of load
                 shift and of a system configuration change are
                 included, and the effect of a change of operating
                 system will be noted.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brotherton:1974:CCC,
  author =       "D. E. Brotherton",
  title =        "The computer capacity curve --- a prerequisite for
                 computer performance evaluation and improvement",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "166--179",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809382",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Measurements of themselves have tended to concentrate
                 on specific computer configuration components (e.g.,
                 CPU load, channel load, disk data set contention,
                 problem program optimization, operating system
                 optimization, etc.) rather than at the total computer
                 configuration level. As a consequence, since these
                 components can have a high degree of interaction, the
                 requirement currently exists for a workable
                 configuration performance concept which will reflect
                 the configuration performance change that is the
                 resultant of single or multiple component change. It is
                 the author's opinion that such a concept will provide
                 management and measurement specialists a planning and
                 analysis tool that can be well Used in evaluating the
                 costs. It is to this configuration performance concept
                 that this paper is addressed, and the concept by my
                 choosing is named ``The Computer Capacity Curve.''",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Erikson:1974:VCU,
  author =       "Warren J. Erikson",
  title =        "The value of {CPU} utilization as a criterion for
                 computer system usage",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "180--187",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809383",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "It is generally agreed that a computer system's CPU
                 utilization means little by itself, but there has been
                 only a limited amount of research to determine the
                 value of CPU utilization when used with other
                 performance measures. This paper focuses on
                 time-sharing systems (or similar systems such as some
                 remote batch systems) as viewed by someone who wants to
                 minimize the mean cost per job run on the system. The
                 paper considers cost per job to include both the
                 computer cost (as allocated among all the jobs run on
                 the system) and the user cost (where user cost is the
                 time spent waiting for a response from the system
                 multiplied by the user's wage rate). Given this
                 approach, cost per job is a function of some constants
                 (user wage rate, computer system cost, and mean
                 processing time per job) and only one variable (CPU
                 utilization). The model thus developed can be used to
                 determine the optimum CPU utilization for any system.
                 It can also be used to determine the value of different
                 tuning efforts.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Badel:1974:AOP,
  author =       "M. Badel and E. Gelenbe and J. Leroudier and D. Potier
                 and J. Lenfant",
  title =        "Adaptive optimization of the performance of a virtual
                 memory computer",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "188--188",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007773.809384",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "It is known that the regulation of the degree of
                 multiprogramming is perhaps one of the most important
                 factors determining the overall performance of a
                 virtual memory computer. In this paper we present an
                 approach which differs some what from the approaches
                 usually taken to regulate the degree of
                 multiprogramming, which are mainly derived from the
                 working-set principles. We design a controller which
                 will regulate the system in order to optimize a given
                 performance measure. The controller is applied to a
                 system where the critical resource is primary memory,
                 and we are only concerned with systems where
                 ineffective regulation leads to the phenomenon known as
                 thrashing due to extensive paging activity. In the
                 first section, the dynamics of the system we wish to
                 regulate are investigated using an analytical model.
                 The system consists of a set of terminals and of a
                 resource loop (CPU, secondary memory device, file disk)
                 shared by the users. Using classical assumptions about
                 program behavior (e.g., life-time function), the
                 throughput of the RL is obtained as a function of the
                 degree of multiprogramming $n$ (number of users sharing
                 the resources at a given instant of time) and of the
                 system parameters. This result provides a greater
                 insight of the ``plant'' we wish to control. The
                 mathematical results are validated and extended with
                 data from simulation experiments using a more detailed
                 model (overheads and non-exponential assumption). In
                 the next section, a criterion called ``dilatation''
                 based on the utilization of the different resources is
                 defined. From the analytical and simulation results of
                 the first section, it can be shown that there exists a
                 value no of the degree of multiprogramming which
                 maximizes this criterion. The regulation of $n$ to no
                 is achieved by controlling the access of the users to
                 the RL. The value of no is estimated in real-time
                 through a continuous estimation of the two first
                 moments of the criterion. Using these estimations, the
                 decision of introducing or not a new user in the RL is
                 taken whenever a user leaves a terminal or departs from
                 the RL. Extensive simulation experiments were
                 conducted, where the implementation of the different
                 functions of the controller have been thoroughly
                 simulated. They have shown that the control scheme
                 leaves to an improvement of the system performance in
                 mean response time and resource utilization, and,
                 overall, adapts in real-time the degree of
                 multiprogramming to the characteristics of the users
                 (the adaptation is performed in 4 sec. or so for a unit
                 variation of the optimal degree of multiprogramming). A
                 discussion of practical application of results ends the
                 paper.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kimbleton:1974:BCS,
  author =       "Stephen R. Kimbleton",
  title =        "Batch computer scheduling: a heuristically motivated
                 approach",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "189--198",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809385",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Efficient scheduling of jobs for computer systems is a
                 problem of continuing concern. The applicability of
                 scheduling methodology described in the operations
                 research literature is severely restricted by the
                 dimensionality of job characteristics, the number of
                 distinct resource types comprising a computer system,
                 the non-deterministic nature of the system due to both
                 interprocess interaction and contention, and the
                 existence of a multitude of constraints effecting job
                 initiation times, job completion times, and job
                 interactions. In view of the large number of issues
                 which must be considered in job scheduling, a heuristic
                 approach seems appropriate. This paper describes an
                 initial implementation of such an approach based upon a
                 fast, analytically driven, performance prediction
                 tool.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sharp:1974:APD,
  author =       "Joseph C. Sharp and James N. Roberts",
  title =        "An adaptive policy driven scheduler",
  journal =      j-SIGMETRICS,
  volume =       "3",
  number =       "4",
  pages =        "199--208",
  month =        dec,
  year =         "1974",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800277.809386",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:50:41 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The theory of policy driven schedulers (Ref. [1]) is
                 extended to cover cases in which the scheduling
                 parameters are allowed to adapt dynamically as the
                 system's job load varies. The system under
                 consideration offers batch, time sharing and limited
                 real time services. Data from simulated and live loads
                 are presented to evaluate both the static and the
                 adaptive schedulers. A policy driven scheduler makes
                 its decisions with respect to a set of policy
                 functions, fi(t). Each of the policy functions
                 corresponds to a different type of user and specifies
                 the amount of computing resources that the system will
                 try to give a user in that group within a given total
                 amount of elapsed time. It is found that the policy
                 functions must be set conservatively in order to avoid
                 response problems during periods of heavy load, but
                 that during more lightly loaded periods the
                 conservative settings result in widely disparate rates
                 of service to similar jobs. One solution is to vary the
                 policy functions as the job load changes. A dynamic
                 algorithm is presented that maintains responsiveness
                 during heavy loads and provides fairly uniform service
                 rates at other times.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Merrill:1975:FCC,
  author =       "H. W. Barry Merrill",
  title =        "Further comments on comparative evaluation of {Kiviat}
                 graphs",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "1",
  pages =        "1--10",
  month =        jan,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041695.1041696",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:18 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Mike Morris has presented an excellent discussion in
                 these pages (1) of the use of Kiviat Graphs for
                 Computer Performance Evaluation, referencing another
                 fine article (2) which proposed a technique for
                 analytic comparisons (rankings) of these Graphs. Morris
                 also proposes that these techniques may be very useful
                 in describing system performance, and suggests a
                 different method for calculation of `Figures of Merit'
                 of Kiviat Graphs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stevens:1975:NFM,
  author =       "Barry A. Stevens",
  title =        "A note on figure of merit",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "1",
  pages =        "11--19",
  month =        jan,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041695.1041697",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:18 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Since Merrill proposed a Figure of Merit (FOM) for use
                 in interpretation of the Kiviat Graph (KG), the FOM has
                 found its way into at least one computer program to
                 plot those graphs, and has been the subject of further
                 discussion and amplification and has had alternate
                 computation methods proposed and rebutted.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bell:1975:MCP,
  author =       "Thomas E. Bell",
  title =        "Managing computer performance with control limits",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "1",
  pages =        "21--28",
  month =        jan,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041695.1041698",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:18 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Dr. Bell received his doctorate in Operations
                 Management from the University of California at Los
                 Angeles in 1968. He immediately joined the Rand
                 Corporation as a Member of the Technical Staff in its
                 Computer Science Department and undertook research in
                 the simulation and perfomance improvement of computing
                 systems. During this research he participated in the
                 definition of the Extendable Computer System Simulator,
                 the development of a methodology for computer
                 performance improvement, and analysis of large,
                 multi-machine computer installations. He also analyzed
                 requirements for future command-and-control systems and
                 for logistic systems, in order to determine required
                 system functions and hardware size. He left Rand in
                 early 1974 to join the Software Research and Technology
                 Staff of TRW Systems Group where he is currently
                 developing improved techniques to specify the
                 requirements of computer software systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Browne:1975:AMP,
  author =       "J. C. Browne",
  title =        "An analysis of measurement procedures for computer
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "1",
  pages =        "29--32",
  month =        jan,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041695.1041699",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:18 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper purports to be a partial record of the
                 remarks made by the author at a panel session sponsored
                 by SIGMETRICS at the 1974 ACM National Conference in
                 San Diego. All of the material covered in the talk is
                 not included here primarily because it appears in other
                 contexts or in the presentations of other speakers.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Terplan:1975:COR,
  author =       "Kornel Terplan",
  title =        "Cost-optimal reliability of data processing systems",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "2",
  pages =        "1--12",
  month =        apr,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041701.1041702",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "With the advent of third generation computing systems,
                 the increase in complexity and power has reached a
                 degree which exceeds the human ability to understand,
                 to analyze, to predict, and to optimize system
                 performance and reliability. The only method that can
                 help is measurement. In defining measurement purposes,
                 one has to define which measurable quantities in the
                 system are significant and which may be ignored. But,
                 at the present time, we do not know in general what is
                 relevant in the measurements. For the sake of clarity,
                 it is useful to define several levels of measurement
                 organizational level --- computer center level-
                 computing system level --- job level --- computer
                 subsystem level.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Landwehr:1975:USM,
  author =       "Carl E. Landwehr",
  title =        "Usage statistics for {MTS}",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "2",
  pages =        "13--23",
  month =        apr,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041701.1041703",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The following report is presented in response to
                 Professor Browne's request for case studies of
                 performance measurement projects; this study takes a
                 macroscopic view of a large-scale time sharing and
                 batch processing installation.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Reddy:1975:EEM,
  author =       "Y. V. Reddy",
  title =        "Experimental evaluation of a multiprogrammed computer
                 system",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "2",
  pages =        "24--32",
  month =        apr,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041701.1041704",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper reports on the design and analysis of a
                 statistical experiment conducted on a `live' job stream
                 to determine the effect of segment size used for
                 storage allocation on the system performance.
                 Performance measures selected are turnaround time,
                 total cost and CPU utilization. The experiment consists
                 of one factor, the segment size, at five levels.
                 Uncontrolled factors such as EXCP's (number of I/O
                 starts) and core usage are included as covariates in
                 the analysis of variance. This experiment is part of a
                 continuing activity of Measurement, Evaluation and
                 Simulation. It is designed to provide data for
                 improving performance incrementally. The results of the
                 experiment provided an optimal segment size for the
                 given classing/scheduling algorithm and core-layout.
                 Design objectives and details of the analysis are also
                 presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bhandarkar:1975:PAM,
  author =       "Dileep P. Bhandarkar",
  title =        "A practical application of memory interference
                 models",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "2",
  pages =        "33--39",
  month =        apr,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041701.1041705",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:24 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper briefly describes an approximate Markov
                 chain model for memory interference in a multiprocessor
                 system like C.mmp. The modeling assumptions explain the
                 level of abstraction at which the analysis is carried
                 out. Some empirical measurements are presented to
                 determine the model parameters for C.mmp. The analytic
                 results obtained from the model are compared with some
                 measured and simulation results.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bahr:1975:NFM,
  author =       "Dieter Bahr",
  title =        "A note on figures of merit",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "3",
  pages =        "1--3",
  month =        jul,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041707.1041708",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:31 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There are different ways to compute figures of merit
                 (FOM). You may use Morris' [1] or Merrill's method [2]
                 or create any new one. But, in my opinion, that does
                 not answer the question whether these numbers are
                 nonsense or not.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Boehm:1975:ICP,
  author =       "B. W. Boehm and T. E. Bell",
  title =        "Issues in computer performance evaluation: some
                 consensus, some divergence",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "3",
  pages =        "4--39",
  month =        jul,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041707.1041709",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:31 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper summarizes the results of an ACM/NBS
                 Workshop on Computer Performance Evaluation. Computer
                 Performance Evaluation (CPE) was selected as the
                 subject of an ACM/NBS Workshop because of the
                 significant leverage CPE activities can have on
                 computer usage. This paper describes a number of
                 conclusions abstracted from the discussions as well as
                 presenting recommendations formally adopted by the
                 participants. While several of these conclusions
                 indicate that improvements are needed in performance
                 analysis tools, another suggests that improved
                 application of CPE could be achieved by better
                 documentation of analysis approaches. More integration
                 of data collection and modeling are considered
                 necessary for the performance analysis field to develop
                 its full potential. Participants noted that the common
                 emphasis on data collection or modeling, to the
                 exclusion of considering objectives, often seriously
                 degrades the value of performance analyses; the only
                 savings that really count from a performance analysis
                 are the ones that appear on the bottom line of the
                 balance sheet.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Barber:1975:BC,
  author =       "Eric Ole Barber and Arne Asphjell and Arve Dispen",
  title =        "Benchmark construction",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "4",
  pages =        "3--14",
  month =        oct,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041711.1041712",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:35 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A partially automated method of generating benchmarks
                 for comparison of EXEC 8 with other systems has been
                 developed as one step in preparation for choosing a new
                 computer at the University of Trondheim.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Marrevee:1975:MPP,
  author =       "J. P. Marrev{\'e}e",
  title =        "Measurements of the {Philips P1400} multiprogramming
                 system",
  journal =      j-SIGMETRICS,
  volume =       "4",
  number =       "4",
  pages =        "15--45",
  month =        oct,
  year =         "1975",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041711.1041713",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:35 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A number of performance measurements have been made on
                 a Philips P1000 computer under its Multiprogramming
                 System (MPS) in a business applications environment.
                 All measurements were collected by software monitoring
                 programs which were developed with the following
                 objectives in mind: general applicability; minimum
                 overhead; and, as much as possible, independence of
                 Monitor releases.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wright:1976:AET,
  author =       "Linda S. Wright and William A. Burnette",
  title =        "An approach to evaluating time sharing systems:
                 {MH-TSS} a case study",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1",
  pages =        "8--28",
  month =        jan,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041716",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The authors conducted a benchmark measurement of the
                 Murray Hill Time Sharing System (MH-TSS) running on a
                 Honeywell 6000. The object of the test was to duplicate
                 the load normally present on the Murray Hill production
                 system, and measure the system's behavior before and
                 after a major software release and a major hardware
                 improvement. Five different load levels, from 30 to 90
                 users, were measured for each configuration. This paper
                 discusses the methods used in the design of the
                 experiment and in the analysis and interpretation of
                 the results. Several measurement tools were used in
                 this test. The event trace collection facility of
                 MH-TSS was used for the benchmark measurement and for
                 the design and fine tuning of a scrint representing the
                 normal load at Murray Hill. A commercially available
                 H6000-specific terminal simulator was used to feed
                 these scripts to the system. The batch background
                 system was loaded by a stream of synthetic jobs,
                 matched in resource usage characteristics to a set of
                 jobs chosen at random from the job stream of the
                 production system. The event trace data gathered at
                 various load levels under the three software and
                 hardware configurations were analyzed using two
                 techniques employing a state transition representation
                 of program behavior and system response. The result was
                 a set of data which documents the expected performance
                 improvements for the new software and hardware being
                 installed at Murray Hill, and which suggests the
                 expected growth potential for MH-TSS.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "event trace; monitoring; operating systems; queuing
                 networks; response time; state transition models",
}

@Article{Calcagni:1976:SRK,
  author =       "John M. Calcagni",
  title =        "Shape in ranking {Kiviat} graphs",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1",
  pages =        "35--37",
  month =        jan,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041717",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The purpose of this paper is to address the topic of
                 ranking or comparing Kiviat Graphs. Several articles
                 have appeared on the subject. For background
                 information the reader is directed to the original
                 article by Philip Kiviat and Kenneth Kolence (1) and to
                 the articles on ranking by Barry Merrill (2, 4) and
                 Michael Morris. The main emphasis here will be on
                 showing how automatic inclusion of axis-value
                 normalizations and hence of pattern normalization can
                 be achieved. It is hoped that this will be one way of
                 making the ranking of Kiviat Graphs more meaningful and
                 hence more useful. Pattern recognition is, after all,
                 one of the main reasons for using the Kiviat Graph
                 technique.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Eisenfeld:1976:IRH,
  author =       "J. Eisenfeld and David R. Barker and David J.
                 Mishelvich",
  title =        "Iconic representation of the human face with computer
                 graphics",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1",
  pages =        "38--39",
  month =        jan,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041718",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There are many applications for the iconic
                 representation of the human face. The program discussed
                 here was designed to describe the face by means of
                 measurements made on a skeletal radiograph and, in
                 particular, could be used to indicate changes resulting
                 from oral surgery. The computer generated faces are
                 drawn using a program modified by the authors which was
                 produced and kindly given to us by Mr Robert Jacob and
                 Dr William H. Huggins of the Johns Hopkins University.
                 Their program was based on that developed by Dr Herman
                 Chernoff (1) of Stanford University. The program was
                 originally designed for the presentation of
                 multivariate statistical data and was modified by Jacob
                 and Huggins for use in iconic communication. As a
                 result of our modifications, the mouth, nose, and
                 facial outline are presented more realistically, the
                 data input is interactive and quicker, especially when
                 only a few input variables are more directly related to
                 facial components to facilitate accuracy in drawing.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Nutt:1976:TCS,
  author =       "Gary J. Nutt",
  title =        "Tutorial: computer system monitors",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1",
  pages =        "41--51",
  month =        jan,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041719",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The most important questions to be answered before
                 attempting to monitor a machine are {\em what\/} to
                 measure and {\em why\/} the measurement should be
                 taken. There is no general answer to these questions,
                 although a comprehensive set of considerations has been
                 discussed elsewhere. The following example indicates
                 some of the considerations involved. Suppose one is
                 interested in tuning a medium scale system which
                 utilizes virtual memory to support a batch
                 multiprogramming strategy. The nature of the job load
                 is a major factor in determining system performance;
                 the mix may be monopolized by I/O-bound jobs which use
                 very little processor time. In this case, the
                 bottleneck might be the mass storage system or the
                 peripheral devices. Resource utilization of the
                 peripheral devices may indicate bottlenecks at that
                 point; high mass storage utilization may not be
                 attributable only to the I/O operations, but may be
                 significantly influenced by the virtual memory
                 replacement policy. Processor utilization in this
                 system is also an insufficient measure for most
                 purposes, since the overhead time for spooling,
                 multiprogramming, and virtual memory may be unknown. A
                 more useful measurement for operating system policy
                 studies would quantify processor utilization for the
                 user as well as for each function of interest in the
                 operating system. From this example, one can see that
                 the variety of evaluation objectives and computer
                 systems causes the determination of what and why to be
                 largely a heuristic problem.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cotton:1976:SFP,
  author =       "Ira W. Cotton",
  title =        "Some fundamentals of price theory for computer
                 services",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "1--12",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041716",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The authors conducted a benchmark measurement of the
                 Murray Hill Time Sharing System (MH-TSS) running on a
                 Honeywell 6000. The object of the test was to duplicate
                 the load normally present on the Murray Hill production
                 system, and measure the system's behavior before and
                 after a major software release and a major hardware
                 improvement. Five different load levels, from 30 to 90
                 users, were measured for each configuration. This paper
                 discusses the methods used in the design of the
                 experiment and in the analysis and interpretation of
                 the results. Several measurement tools were used in
                 this test. The event trace collection facility of
                 MH-TSS was used for the benchmark measurement and for
                 the design and fine tuning of a scrint representing the
                 normal load at Murray Hill. A commercially available
                 H6000-specific terminal simulator was used to feed
                 these scripts to the system. The batch background
                 system was loaded by a stream of synthetic jobs,
                 matched in resource usage characteristics to a set of
                 jobs chosen at random from the job stream of the
                 production system. The event trace data gathered at
                 various load levels under the three software and
                 hardware configurations were analyzed using two
                 techniques employing a state transition representation
                 of program behavior and system response. The result was
                 a set of data which documents the expected performance
                 improvements for the new software and hardware being
                 installed at Murray Hill, and which suggests the
                 expected growth potential for MH-TSS.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "event trace; monitoring; operating systems; queuing
                 networks; response time; state transition models",
}

@Article{Giammo:1976:DCP,
  author =       "Thomas Giammo",
  title =        "Deficiencies in computer pricing structure theory",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "13--21",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041717",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The purpose of this paper is to address the topic of
                 ranking or comparing Kiviat Graphs. Several articles
                 have appeared on the subject. For background
                 information the reader is directed to the original
                 article by Philip Kiviat and Kenneth Kolence (1) and to
                 the articles on ranking by Barry Merrill (2, 4) and
                 Michael Morris. The main emphasis here will be on
                 showing how automatic inclusion of axis-value
                 normalizations and hence of pattern normalization can
                 be achieved. It is hoped that this will be one way of
                 making the ranking of Kiviat Graphs more meaningful and
                 hence more useful. Pattern recognition is, after all,
                 one of the main reasons for using the Kiviat Graph
                 technique.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kimbleton:1976:CPD,
  author =       "Stephen R. Kimbleton",
  title =        "Considerations in pricing distributed computing",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "22--30",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041718",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There are many applications for the iconic
                 representation of the human face. The program discussed
                 here was designed to describe the face by means of
                 measurements made on a skeletal radiograph and, in
                 particular, could be used to indicate changes resulting
                 from oral surgery. The computer generated faces are
                 drawn using a program modified by the authors which was
                 produced and kindly given to us by Mr Robert Jacob and
                 Dr William H. Huggins of the Johns Hopkins University.
                 Their program was based on that developed by Dr Herman
                 Chernoff (1) of Stanford University. The program was
                 originally designed for the presentation of
                 multivariate statistical data and was modified by Jacob
                 and Huggins for use in iconic communication. As a
                 result of our modifications, the mouth, nose, and
                 facial outline are presented more realistically, the
                 data input is interactive and quicker, especially when
                 only a few input variables are more directly related to
                 facial components to facilitate accuracy in drawing.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kiviat:1976:BRG,
  author =       "Philip J. Kiviat",
  title =        "A brief review of the {GAO} task group's
                 recommendations on management guidelines for pricing
                 computer services in the federal government",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "71--83",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041715.1041719",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The most important questions to be answered before
                 attempting to monitor a machine are {\em what\/} to
                 measure and {\em why\/} the measurement should be
                 taken. There is no general answer to these questions,
                 although a comprehensive set of considerations has been
                 discussed elsewhere. The following example indicates
                 some of the considerations involved. Suppose one is
                 interested in tuning a medium scale system which
                 utilizes virtual memory to support a batch
                 multiprogramming strategy. The nature of the job load
                 is a major factor in determining system performance;
                 the mix may be monopolized by I/O-bound jobs which use
                 very little processor time. In this case, the
                 bottleneck might be the mass storage system or the
                 peripheral devices. Resource utilization of the
                 peripheral devices may indicate bottlenecks at that
                 point; high mass storage utilization may not be
                 attributable only to the I/O operations, but may be
                 significantly influenced by the virtual memory
                 replacement policy. Processor utilization in this
                 system is also an insufficient measure for most
                 purposes, since the overhead time for spooling,
                 multiprogramming, and virtual memory may be unknown. A
                 more useful measurement for operating system policy
                 studies would quantify processor utilization for the
                 user as well as for each function of interest in the
                 operating system. From this example, one can see that
                 the variety of evaluation objectives and computer
                 systems causes the determination of what and why to be
                 largely a heuristic problem.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Morris:1976:PIP,
  author =       "Michael F. Morris",
  title =        "Problems in implementing and processing computer
                 charging schemes",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "84--88",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041739.1041744",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "It is important to point out at the beginning of this
                 presentation that we have strayed quite far from the
                 titled topic of our workshop --- `Pricing Computer
                 Services.' This makes my task much easier because I'm
                 not at all sure what `service' we get from computers
                 and `pricing' is seldom related in any economic sense
                 with the cost of production. Here we have really been
                 discussing `Charging for Computer Resource Usage.' I
                 will stay with the topic as we've been discussing it
                 rather than with the topic as I thought it should be.
                 To make to distinction clear between pricing services
                 and charging for resource usage I will relate a very
                 simple story from a recent newspaper.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Luderer:1976:CPM,
  author =       "Gottfried W. R. Luderer",
  title =        "Charging problems in mixed time-sharing\slash batch
                 systems: cross subsidization and invariant work units",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "89--93",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041739.1041745",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper discusses two topics related to charging
                 for computing services in mixed timesharing/batch
                 systems. The first one is the problem of cross
                 subsidization between time-sharing and batch service. A
                 method is proposed which helps to avoid this
                 phenomenon. The second topic deals with the question of
                 helping the user to divide his work between
                 time-sharing and batch service based on charging
                 information. Basically, the approach is to define a
                 service-invariant computing work unit, which is priced
                 differently according to grade of service. Time-sharing
                 and batch are considered to be different grades of
                 service. The cost impact of moving work between
                 services can thus be more easily estimated. A method
                 for calculating grade-of-service factors from cost and
                 workload estimates is presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Oatey:1976:STM,
  author =       "David J. Oatey",
  title =        "{SIGMETRICS} technical meeting on pricing computer
                 services",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "94--102",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041739.1041746",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This presentation will show how one large installation
                 actually does pricing of several on-line systems. This
                 is a `pricing in practice' example with the resultant
                 procedures, measures, and pricing determined by the
                 blending of several practical, political, and
                 theoretical influences.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gutsche:1976:UE,
  author =       "Richard H. Gutsche",
  title =        "User experience",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "103--107",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041739.1041747",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Security Pacific is the tenth largest bank in the
                 United States, operating 500 banking locations in the
                 State of California. Our Electronic Data Processing
                 Department serves the entire system from its Glendale
                 Operations Center and a satellite center in Hayward.
                 The Hayward location serves as an input/output center
                 for our Northern California banking offices. Data
                 Transmission provides for centralization of all
                 accounting functions.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Anonymous:1976:PC,
  author =       "Anonymous",
  title =        "Participant's choice",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "1c",
  pages =        "108--122",
  month =        mar,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041739.1041748",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:47 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "During these two sessions, chaired by Richard Gutsche
                 of Security Pacific National Bank, a panel of experts
                 addressed specific pricing problems the participants
                 and attendees felt were important. The preliminary
                 questions that the panelists addressed included: $
                 \bullet $ What should be included in an overhead charge
                 and why? $ \bullet $ Should a computer center be
                 price-competitive with an outside market?$ \bullet $
                 Funding a computer center --- real or funny money?$
                 \bullet $ What is an appropriate charging philosophy
                 for a paging environment?",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Luderer:1976:DCR,
  author =       "Gottfried W. R. Luderer",
  title =        "Defining a computer resource unit",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "2",
  pages =        "5--10",
  month =        apr,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041721.1041722",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A method for the construction of a resource component
                 charging formula for computer service in a
                 multiprogramming system is defined. Charges are
                 proportional to relative resource costs, to fractional
                 resource use with regard to total expected resource
                 usage, and the intent is to recover cost without profit
                 or loss. Further, a method is presented that simplifies
                 the treatment of overhead or unallocatable resource
                 costs. An aggregate `Computer Resource Unit' is
                 defined, which attempts to characterize workload in a
                 system-invariant way. Experiences with this concept and
                 its limitations are discussed. Recommendations for
                 those planning to introduce a similar concept are
                 given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer charging; overhead allocation; virtual time;
                 workload characterization",
}

@Article{Roehr:1976:PIT,
  author =       "K. Roehr and K. Niebel",
  title =        "Proposal for instruction time objectives",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "2",
  pages =        "11--18",
  month =        apr,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041721.1041723",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The designer of an instruction processing unit is
                 generally faced with the problem to implement a machine
                 able to execute a given instruction set within given
                 timing and cost constraints. A very common method to
                 state instruction timing constraints is by means of an
                 average instruction time",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Collins:1976:PIC,
  author =       "John P. Collins",
  title =        "Performance improvement of the {CP-V} loader through
                 use of the {ADAM} hardware monitor",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "2",
  pages =        "63--67",
  month =        apr,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041721.1041724",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The ADAM hardware monitor can be used to localize and
                 identify several types of performance-impairing
                 behavior in user programs. This paper presents a case
                 study for such an improvement carried out on the CP-V
                 overlay loader. Through measurement of the execution
                 behavior and the subsequent analysis of the resulting
                 data, problems of three basic types were identified: 1.
                 The presence of inefficiently coded routines in areas
                 of high execution intensity; 2. The use of overly
                 general routines along heavily-used program paths; and
                 3. The use of inefficient algorithms for processing the
                 large amounts of data with which the loader deals. The
                 subsequent redesign and recoding of the problem areas
                 have resulted in a significant performance improvement:
                 the time required to load a program has been reduced by
                 a factor of between two and ten, dependent upon the
                 nature of the program and the loader options
                 specified.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brandwajn:1976:SLI,
  author =       "A. Brandwajn",
  title =        "Simulation of the load of an interactive system",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "2",
  pages =        "69--92",
  month =        apr,
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041721.1041725",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We describe a simulator of interactive users designed
                 for the resource sharing system ESOPE. We stress the
                 guide-lines of the design as well as the problems of
                 interface with the operating system, of measurements,
                 and of perturbations caused by the simulator in the
                 statistics gathered. We show two examples of an
                 application of the simulator to the design of a
                 resource-sharing system, viz., to an analysis of load
                 regulation policies, and to an evaluation of the
                 improvement in system performance one may expect from
                 implementing shared translators. Finally, we use the
                 load simulator to validate a mathematical model. The
                 latter is developed by step-wise refinement, using
                 measured values of model parameters, till a good
                 agreement between the performance indices computed from
                 our model and those measured in a real system under
                 simulated load, is obtained. It is observed that, for
                 most of the performance measures considered, a simple
                 model matches fairly well the `real world'.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Coppens:1976:QER,
  author =       "G. W. J. Coppens and M. P. F. M. van Dongen and J. P.
                 C. Kleijnen",
  title =        "Quantile estimation in regenerative simulation: a case
                 study",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "3",
  pages =        "5--15",
  month =        "Summer",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041727.1041728",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:59 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We model key-punching in a computer center as a
                 queuing simulation with 2 servers (typists) and 3
                 priority classes (small, medium, large jobs). The 90\%
                 quantile of queuing time is estimated for different
                 borderlines between the 3 job classes. Confidence
                 intervals for the quantiles are based on the
                 regenerative properties of the simulation, as derived
                 by Iglehart (1974). They utilize the asymptotic
                 normality of the estimated quantile, and a rather
                 complicated expression for its variance. Numerical
                 results are given for the quantiles (and averages) of
                 the queuing times in each job class, for several
                 borderlines between the 3 job classes. The effects of
                 simulation runlength on the confidence intervals were
                 also examined. The effects of varying job-class
                 borderlines were tentatively modeled by a regression
                 model.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Estell:1976:HFRa,
  author =       "Robert G. Estell",
  title =        "How fast is `real-time'?",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "3",
  pages =        "16--18",
  month =        "Summer",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041727.1041729",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:59 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A single bench mark test was compiled and run on the
                 AN/UYK-7 computer, and on a number of commercial
                 computers, in order to measure the relative throughput
                 of the UYK-7, which is the Navy's large scale real-time
                 computer. The results indicate the speeds and
                 accuracies of each host; however, general conclusions
                 can be drawn only with some risk.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Mills:1976:SMC,
  author =       "Philip M. Mills",
  title =        "A simple model for cost considerations in a batch
                 multiprocessor environment",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "3",
  pages =        "19--27",
  month =        "Summer",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041727.1041730",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:51:59 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes a simple model which provides a
                 procedure for estimating the effect of additional
                 hardware on run time. The additional hardware may be
                 additional processors, more powerful processors, an
                 increase in memory size or additional memory modules.
                 Run time is related to cost effectiveness. A measure of
                 memory interference in the form of effective processing
                 power is determined for multiprocessors and used in the
                 formulation of run time. The overall procedure allows
                 the user to compare different multiprocessor hardware
                 configurations on a cost effective basis.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Buchanan:1976:IBM,
  author =       "Irene Buchanan and David A. Duce",
  title =        "An interactive benchmark for a multi-user minicomputer
                 system",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "4",
  pages =        "5--17",
  month =        "Fall",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041732.1041733",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:04 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The work that forms the basis for this paper was
                 undertaken as part of an exercise to purchase two
                 multi-user minicomputer systems to be developed as
                 interactive facilities for grant holders supported by
                 the Engineering Board of the United Kingdom Science
                 Research Council.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Estell:1976:HFRb,
  author =       "Robert G. Estell",
  title =        "How fast is `real-time'?",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "4",
  pages =        "18--20",
  month =        "Fall",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041732.1041734",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:04 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A single bench mark test was compiled and run on the
                 AN/UYK-7 computer, and on a number of commercial
                 computers, in order to measure the relative throughput
                 of the UYK-7, which is the Navy's large scale real-time
                 computer. The results indicate the speeds and
                 accuracies of each host; however, general conclusions
                 can be drawn only with some risk.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rafii:1976:SPR,
  author =       "Abbas Rafii",
  title =        "Study of the performance of {RPS}",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "4",
  pages =        "21--38",
  month =        "Fall",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041732.1041735",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:04 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The objective of this study is to evaluate the impact
                 of RPS (Rotational Position Sensing) on the response
                 time and utilization of multiple spindle disk drives
                 with a shared channel. Simulation models are used to
                 compare the effectiveness of the RPS scheme with the
                 systems without RPS capability. Analytical models for
                 the number of RPS rotation misses and the utilization
                 of the channel at the saturation point are given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Price:1976:CQN,
  author =       "Thomas G. Price",
  title =        "A comparison of queuing network models and
                 measurements of a multiprogrammed computer system",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "4",
  pages =        "39--62",
  month =        "Fall",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041732.1041736",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:04 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Although there has been a substantial amount of work
                 on analytical models of computer systems, there has
                 been little experimental validation of the models. This
                 paper investigates the accuracy of the models by
                 comparing the results calculated using analytical
                 models with measurements of an actual system. Models
                 with and without overlapped seeks are compared. Also,
                 we show how a model can be used to help interpret
                 measurements of a real system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "analytical models; performance measurement and
                 evaluation; queuing networks",
}

@Article{Buzen:1976:TTT,
  author =       "J. P. Buzen",
  title =        "Tuning: tools and techniques",
  journal =      j-SIGMETRICS,
  volume =       "5",
  number =       "4",
  pages =        "63--81",
  month =        "Fall",
  year =         "1976",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041732.1041737",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:04 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Tuning is basically a two stage process: the first
                 stage consists of detecting performance problems within
                 a system, and the second stage consists of changing the
                 system to correct these problems. Measurement tools
                 such as hardware monitors, software monitors and
                 accounting packages are typically used in the first
                 stage, and tools such as optimizers, simulators and
                 balancers are sometimes used in the second stage.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Spiegel:1977:WSA,
  author =       "Mitchell G. Spiegel",
  title =        "Workshop summary: `Applications of queuing models to
                 {ADP} system performance prediction'",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "1",
  pages =        "13--33",
  month =        "Winter",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1044829.1044830",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A workshop was held on the Applications of Queuing
                 Models to ADP System Performance Prediction on 7-8
                 March 1977 at the National Technical Information
                 Service in Springfield, VA. Topics were divided into
                 four general areas: (1) Application of Queuing Models
                 to Feasibility and Sizing Studies, (2) Application of
                 Queuing Models to System Design and Performance
                 Management, (3) Queuing Model Validation and (4) New
                 Queuing Model Implementations. Mr Philip J. Kiviat,
                 Chairman, SIGMETRICS, made the welcoming remarks. As
                 Workshop Chairman, I provided a historical overview of
                 queuing model use which traced the development of the
                 application of queuing models to ADP system performance
                 prediction through the 20th century, while setting the
                 stage for each speaker's talk.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hellerman:1977:TWF,
  author =       "L. Hellerman",
  title =        "A table of work formulae with derivations and
                 applications",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "1",
  pages =        "35--54",
  month =        "Winter",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1044829.1044831",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Formulae for the work of certain common simple
                 computational steps are derived. The evaluation is in
                 terms of an information theoretic measure. The results
                 are then applied to evaluate the work of multiplication
                 and division, and the work of the IBM S/370 branch and
                 link instruction.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Allen:1977:NES,
  author =       "R. C. Allen and S. R. Clark",
  title =        "A note on an empirical study of paging on an {IBM
                 370\slash 145}",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "1",
  pages =        "55--62",
  month =        "Winter",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1044829.1044832",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:12 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A summary is presented of the paging activity observed
                 for various programs executing on a System/370 model
                 145 using OS/VSI (Release 2.0). Paging activity was
                 measured by periodic sampling of the queues involved in
                 real storage page management and by inspection of page
                 traffic counters maintained by the operating system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Morrison:1977:ASC,
  author =       "Robert L. Morrison",
  title =        "Abstracts from the 1977 {SIGMETRICS\slash CMG VIII}
                 conference",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "2",
  pages =        "3--21",
  month =        "Spring",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041750.1041751",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:18 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lazos:1977:FDW,
  author =       "Constantine Lazos",
  title =        "Functional distribution of the workload of a linked
                 computer system and its simulation",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "3",
  pages =        "5--14",
  month =        "Summer",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041753.1041754",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:19 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Consideration is given to a possible functional
                 distribution of the workload over two linked computers
                 with separate channel access to a large disc store,
                 into the resource utilisation of the linked system
                 achieved by simulation using a modified and re-entrant
                 single processor simulator. Results suggest that the
                 proposed distribution realises a high utilisation.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "compilation; disc channel traffic; hardware
                 utilisation; I/O buffers; in process; linked computer
                 system; multiprocessing; out process; simulation; trace
                 driven; work load",
}

@Article{Scheer:1977:COM,
  author =       "A.-W. Scheer",
  title =        "Combination of an optimization model for hardware
                 selection with data determination methods",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "3",
  pages =        "15--26",
  month =        "Summer",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041753.1041755",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:19 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The selection of an EDP configuration often fixes a
                 firm to a single manufacturer for a long time and the
                 capabilities of the chosen computer will continually
                 influence the firm's organization. Only few approaches
                 exist to give assistance to the investors by developing
                 useful decision models based on the investment theory
                 /11, 12/. The hardware selection methods /4, 13/ used
                 up to now, like benchmark tests, don't meet these
                 demands. In this paper an investment model based on
                 mathematical programming is developed which considers
                 the aspects of investment for hardware selection.
                 Nevertheless, the present methods stay valid because
                 their output can be used as delta input for the
                 optimization model. Therefore, a concept is proposed
                 which combines these methods with an optimization
                 model.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Berinato:1977:AMT,
  author =       "Terence Berinato",
  title =        "An analytical model of a teleprocessing system",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "3",
  pages =        "27--32",
  month =        "Summer",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041753.1041756",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:19 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A queuing model has been developed to study the
                 performance and capacity of a casualty insurance
                 teleprocessing system. This paper presents the salient
                 features of the system itself, relates those features
                 to basic queuing theory algorithms, outlines the basic
                 model construction, and discusses the validation
                 results.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Chanson:1977:SSA,
  author =       "Samuel T. Chanson and Craig D. Bishop",
  title =        "A simulation study of adaptive scheduling policies in
                 interactive computer systems",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "3",
  pages =        "33--39",
  month =        "Summer",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041753.1041757",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:19 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Recently, some work has been done in the area of
                 dynamically adaptive scheduling in operating systems
                 (i.e., policies that will adjust to varying workload
                 conditions so as to maximize performance) [4],[5],
                 [10], [11]. However, most studies deal with
                 batch-oriented systems only. The University of British
                 Columbia operates an IBM 370/168 running under MTS
                 (Michigan Terminal System) which is principally used
                 interactively. It has been known for some time that the
                 system is Input/Output bound. The main goal of this
                 work is to determine to what extent adaptive control,
                 particularly as related to processor scheduling, can
                 improve performance in a system similar to U. B. C.'s.
                 Simulation is used throughout the study and because of
                 this, the simulator and the workload are described in
                 some detail. The target machine is a somewhat
                 simplified version of the U.B.C. System.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ziegler:1977:DST,
  author =       "Kurt Ziegler",
  title =        "A data sharing tutorial",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "4",
  pages =        "3--7",
  month =        "Fall",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041759.1041760",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:26 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This tutorial is intended to acquaint the reader with
                 the issues of DATA SHARING and to develop an
                 understanding for the implications of such facilities
                 in the topic of integrity, performance, and recovery.
                 Some future concerns are also discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Scott:1977:PDP,
  author =       "Shirley E. Scott",
  title =        "Pricing {D.P.} products: a timesharing
                 implementation",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "4",
  pages =        "8--12",
  month =        "Fall",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041759.1041761",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:26 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Periodically, vending Data Processing organizations
                 are faced with the task of establishing service rates
                 for a resources provided to Customers. Sigmetrics'
                 Technical Meeting on Pricing Computer Services
                 (November, 1975) is a good indicator of the amount and
                 variety of interest the topic generates. The
                 proceedings from that meeting were a key source of
                 reference for the formulation and implementation of a
                 pricing strategy and automated model in one of Xerox's
                 timesharing data centers.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sarzotti:1977:TTS,
  author =       "Alain Sarzotti",
  title =        "Transactional terminal system on micro-processor: a
                 method for identifying \& modeling overall
                 performance",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "4",
  pages =        "13--22",
  month =        "Fall",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041759.1041762",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:26 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A typical banking, financial and administrative system
                 involves specific characteristics: a large number of
                 devices around a processor, with several different
                 kinds of work stations (displays, keyboards, printers,
                 badge and document readers \ldots{}), a heterogeneous
                 workload (by linkage of specialized micro-transactions
                 using local or remote files), versatile operating
                 facilities on displays for untrained administrative
                 personnel (form-loading on the display, selecting key
                 words, spotting errors, generating operational messages
                 \ldots{}), and working with several sets of typical
                 functions (savings operations, cheque accounting, fund
                 transfer, deposits, withdrawals, and mainly data
                 entry).In this case it was mandatory to approach the
                 system performance evaluation study by first building
                 and observing a typical workload model in the forecast
                 operating environment. Measurement steps were then
                 scheduled from outside to inside operating procedures
                 to get analysis from the user's point of view (a bank
                 teller's operations, for example).Then, overall
                 performance results were derived by direct measurement,
                 which established relationships between throughput,
                 response time, processor overhead, and space and time
                 parameters related to system behavior. That was done by
                 progressively increasing the number of terminals and
                 exercising the workload on two levels of technical and
                 functional saturation. Simultaneously, a simulation
                 model used the same description of the workload, and
                 after validation with the preceding direct measurement
                 results, was used to extend the previous relationships
                 on various systems. (The full range of Erlang
                 distribution parameters is assumed with unknown
                 servers; the trace-driven method was not possible.)The
                 final results are shown in tables and charts which
                 exhibit system boundaries, providing useful guidelines
                 for designing network stations and performing workload
                 forecasting.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bazewicz:1977:UMP,
  author =       "Mieczyslaw Bazewicz and Adam Peterseil",
  title =        "Use of modelling in performance evaluation of computer
                 systems: a case of installations in the {Technical
                 University of Wroclaw}",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "4",
  pages =        "22--26",
  month =        "Fall",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041759.1041763",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:26 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There is a number of models of user behaviour applied
                 in modelling studies on computer system performance
                 predictions. The models in most cases can be called
                 `resources-demands models', where users are only
                 considered as resources consumers. Some authors build
                 more sophisticated models --- concerning user
                 psychological features. The paper discusses some of the
                 users' models and their applicability in modelling and
                 design of operating systems for computers. Some
                 examples being the result of the research carried in
                 the Technical University of Wroclaw, concerning complex
                 users' model and performance evaluation of operating
                 systems by simulation are presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Orchard:1977:NMC,
  author =       "R. A. Orchard",
  title =        "A new methodology for computer system data gathering",
  journal =      j-SIGMETRICS,
  volume =       "6",
  number =       "4",
  pages =        "27--41",
  month =        "Fall",
  year =         "1977",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041759.1041764",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:26 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Many computer system monitoring, data gathering, and
                 reduction efforts ignore unbiased sampling techniques.
                 The approaches generally taken are expensive and can
                 make no scientifically based statement about the
                 accuracy of the data gathered or consequent data
                 reduction. The approach outlined in this paper attempts
                 to correct these inadequacies by using the theory of
                 random sampling. Several new techniques are introduced
                 for obtaining optimal error bounds for estimates of
                 computer system quantities obtained from random
                 samples. A point of view is taken (boolean variable
                 random sampling) which makes it unnecessary to have any
                 a priori knowledge of the population parameters of the
                 phenomena being sampled. It is expected that the
                 techniques introduced will significantly reduce
                 monitoring overhead for computer systems while
                 increasing the quality of the data gathered.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "boolean random sampling; computer system monitoring;
                 data gathering",
}

@Article{Underwood:1978:HPE,
  author =       "Mark A. Underwood",
  title =        "Human performance evaluation in the use of federal
                 computer systems: recommendations",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "1--2",
  pages =        "6--14",
  month =        "Spring-Summer",
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041766.1041767",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There has been increased awareness in recent years of
                 the high cost of non-hardware items in the Federal ADP
                 budget in contrast with decreasing costs for much of
                 the hardware. More attention is being given to software
                 development costs, systems design practices, automatic
                 program testing, and the like. Particular commercial
                 and military systems effectiveness and life cycle costs
                 now take into consideration such factors as part of the
                 planning process. It is suggested that not enough
                 attention has been given to measurement of human
                 performance variables as part of the systems
                 procurement and systems evaluation phases of Federal
                 ADP programs. Recommendations are made for the
                 incorporation of such measures along with conventional
                 hardware/software performance measurement.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer performance; federal systems evaluations;
                 human performance measurements; psychology of computer
                 systems usage",
}

@Article{Jain:1978:GSA,
  author =       "Aridaman K. Jain",
  title =        "A guideline to statistical approaches in computer
                 performance evaluation studies",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "1--2",
  pages =        "18--32",
  month =        "Spring-Summer",
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041766.1041768",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Anonymous:1978:PSQ,
  author =       "Anonymous",
  title =        "{Proceedings of the Software Quality and Assurance
                 Workshop}",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "1--2",
  pages =        "32--32",
  month =        "Spring-Summer",
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041766.1041769",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Honig:1978:DPA,
  author =       "Howard P. Honig",
  title =        "Data path analysis: analyzing large {I/O}
                 environments",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "1--2",
  pages =        "34--37",
  month =        "Spring-Summer",
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041766.1041770",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "As data centers grow in complexity and size, vast
                 amounts of data (I/O) is transferred between
                 peripherals and CPU's. Data Path Analysis (DPA) is a
                 technique developed to report the utilization of CPU's,
                 channels, control units, and disks during data
                 transfer. Simply put, the technique analyzes data
                 paths.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sauer:1978:SRP,
  author =       "C. H. Sauer and E. A. MacNair",
  title =        "Simultaneous resource possession in queueing models of
                 computers",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "1--2",
  pages =        "41--52",
  month =        "Spring-Summer",
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041766.1041771",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Neglect of simultaneous resource possession is a
                 significant problem with queueing network models of
                 computers. This is illustrated by examples of memory
                 contention and channel contention with position sensing
                 I/O devices. A class of extended queueing networks is
                 defined to allow representation of simultaneous
                 resource possession. Extended queueing network models
                 of memory contention and channel contention are given.
                 Solution techniques and numerical results for these
                 models are discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "channel contention; hierarchical decomposition; memory
                 contention; performance evaluation; queueing networks;
                 regenerative simulation; response time",
}

@Article{Pfau:1978:AQA,
  author =       "Pamela R. Pfau",
  title =        "Applied quality assurance methodology",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "1--8",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811092",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "What is the charter of a Quality Assurance (Q.A.)
                 department? What are the activities? How are they
                 undertaken? What is the impact of Quality Assurance
                 upon a software product? The structure and operating
                 philosophy of the department are explained in this
                 report as is the definition of the work cycle as
                 applied to a new release of a software product.
                 Comments are made about the interaction between
                 departments: product development, product maintenance,
                 publications, education, field support, product
                 management, marketing, product distribution and quality
                 assurance. While this is a description of the
                 activities of a company involved in developing and
                 marketing software products, the concepts apply to
                 techniques and practices which would also be beneficial
                 to any data processing department that develops
                 in-house application software.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bersoff:1978:SCM,
  author =       "Edward H. Bersoff and Vilas D. Henderson and Stan G.
                 Siegel",
  title =        "Software Configuration Management",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "9--17",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811093",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper is about discipline. It is about discipline
                 that managers should apply to software development. Why
                 is such discipline needed? Quite simply because the
                 software industry has traditionally behaved in an
                 undisciplined manner --- doing its own thing. The
                 products that the industry has turned out have
                 typically Contained other than what was expected
                 (usually less, rather than more); Been delivered much
                 later than scheduled; Cost more than anticipated; Been
                 poorly documented; and If you have been involved in any
                 of the situations quoted above, then this paper may be
                 of some help. In short, if you are now, or intend to
                 be, a software seller or buyer, then you should benefit
                 from an understanding of Software Configuration
                 Management. Lest you think that you are not now, or
                 ever will be, a software seller or buyer --- keep in
                 mind that the recent technology explosion in electronic
                 component miniaturization has placed the era of
                 personalized computing at hand. In that context, nearly
                 everyone may be considered a potential seller or buyer
                 of software. This paper is about the discipline called
                 Software Configuration Management (SCM). The objective
                 of SCM is to assist the software seller in achieving
                 product integrity and to assist the software buyer in
                 obtaining a product that has integrity.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Glass:1978:CFL,
  author =       "Robert L. Glass",
  title =        "Computing failure: a learning experience",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "18--19",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811094",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computing people can learn from failure as well as
                 success. Most professional papers deal only with the
                 latter \ldots{} yet it is well known that some of our
                 most lasting learning experiences are based on failure.
                 This paper is a lighthearted, anecdotal discussion of a
                 computing failure, with an underlying message that
                 sharing the sometimes embarrassing truths about What
                 Goes Wrong In Our Field is at least as illuminating as
                 more serious discussions about Things That Look
                 Promising. There are some necessary defense mechanisms
                 to be dealt with in discussing failure. People who have
                 failed in general do not want the world to know about
                 it. Perhaps even more so, companies which have failed
                 also do not want the world to know about it. As a
                 result, the content of this paper is fictionalized to
                 some extent. That is, company names and people names
                 are creations of the author, and there are
                 corresponding distortions in some story details.
                 However, the computing meat of the paper, the basis for
                 the failure learning experience, is untouched.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Woodmancy:1978:SQI,
  author =       "Donald A. Woodmancy",
  title =        "A Software Quality Improvement Program",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "20--26",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811095",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In late 1976, the NCR Corporation undertook a large
                 scale Quality Improvement Program for a major set of
                 systems software. That software set included some 103
                 separate products totaling 1.3 million source lines. It
                 included several operating systems, several compilers,
                 peripheral software, data utilities and
                 telecommunications handlers. This paper will describe
                 that effort and its results. The research and planning
                 that were done to define the program will be described.
                 The means by which the program was implemented will be
                 discussed in detail. Finally, some results of the
                 program will be identified.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Fujii:1978:CSA,
  author =       "Marilyn S. Fujii",
  title =        "A comparison of software assurance methods",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "27--32",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811096",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Several methods are currently employed by software
                 developers to improve software quality. This paper
                 explores the application of three of these methods:
                 quality assurance, acceptance testing, and independent
                 verification and validation. At first glance these
                 methods appear to overlap, but a closer evaluation
                 reveals that each has a distinct objective and an
                 established set of procedures. The purpose of this
                 paper is to clarify the role of each of these methods
                 by examining their scope, organization, and
                 implementation in the software development process.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sukert:1978:EMA,
  author =       "Alan N. Sukert and Amrit L. Goel",
  title =        "Error modelling applications in software quality
                 assurance",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "33--38",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811097",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents the results of a two-phased
                 experiment conducted by Rome Air Development Center and
                 Syracuse University to demonstrate the potential
                 applicability of software error prediction models in
                 performing formalized qualification testing of a
                 software package. First, decisions based upon the
                 predictions of three software error prediction models
                 will be compared with actual program decisions for a
                 large command and control software development project.
                 Classical and Bayesian demonstration tests are used to
                 make accept/reject decisions about the software system.
                 Finally, the results of the two phases will be compared
                 and some conclusions drawn as to the potential use of
                 these predictive techniques to software quality
                 assurance.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Duran:1978:TMP,
  author =       "Joe W. Duran and John J. Wiorkowski",
  title =        "Toward models for probabilistic program correctness",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "39--44",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811098",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Program testing remains the major way in which program
                 designers convince themselves of the validity of their
                 programs. Software reliability measures based on
                 hardware reliability concepts have been proposed, but
                 adequate models of software reliability have not yet
                 been developed. Investigators have recently studied
                 formal program testing concepts, with promising
                 results, but have not seriously considered quantitative
                 measures of the ``degree of correctness'' of a program.
                 We present models for determining, via testing, such
                 probabilistic measures of program correctness as the
                 probability that a program will run correctly on
                 randomly chosen input data, confidence intervals on the
                 number of errors remaining in a program, and the
                 probability that the program has been completely
                 tested. We also introduce a procedure for enhancing
                 correctness estimates by quantifying the error reducing
                 performance of the methods used to develop and debug a
                 program.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Yin:1978:EUM,
  author =       "B. H. Yin and J. W. Winchester",
  title =        "The establishment and use of measures to evaluate the
                 quality of software designs",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "45--52",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811099",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "It has been recognized that success in producing
                 designs that realize reliable software, even using
                 Structured Design, is intimately dependent on the
                 experience level of the designer. The gap in this
                 methodology is the absence of easily applied
                 quantitative measures of quality that ease the
                 dependence of reliable systems on the rare availability
                 of expert designers. Several metrics have been devised
                 which, when applied to design structure charts, can
                 pinpoint sections of a design that may cause problems
                 during coding, debugging, integration, and
                 modification. These metrics can help provide an
                 independent, unbiased evaluation of design quality.
                 These metrics have been validated against program error
                 data of two recently completed software projects at
                 Hughes. The results indicate that the metrics can
                 provide a predictive measure of program errors
                 experienced during program development. Guidelines for
                 interpreting the design metric values are summarized
                 and a brief description of an interactive structure
                 chart graphics system to simplify metric value
                 calculation is presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Pierce:1978:RTT,
  author =       "Robert A. Pierce",
  title =        "A Requirements Tracing Tool",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "53--60",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800283.811100",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A software development aid termed the Requirements
                 Tracing Tool is described. Though originally designed
                 to facilitate requirements analysis and thus simplify
                 system verification and validation, it has also proven
                 useful as an aid for coping with changing software
                 requirements and estimating their consequent cost and
                 schedule impacts. This tool provides system analysts
                 with a mechanism for automated construction,
                 maintenance, and access to a requirements data base ---
                 an integrated file containing all types and levels of
                 system requirements. This tool was used during the
                 development of a large Navy undersea acoustic sensor
                 system. It is presently being used to support the
                 Cruise Missile Mission Planning Project. An outline
                 version of this tool is under development.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Davis:1978:RLP,
  author =       "Alan M. Davis and Walter J. Rataj",
  title =        "Requirements language processing for the effective
                 testing of real-time systems",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "61--66",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811101",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "GTE Laboratories is currently developing a trio of
                 software tools which automate the feature testing of
                 real-time systems by generating test plans directly
                 from requirements specifications. Use of the first of
                 these tools, the Requirements Language Processor (RLP),
                 guarantees that the requirements are complete,
                 consistent, non-ambiguous, and non-redundant. It
                 generates a model of an extended finite-state machine
                 which is used by the second tool, the Test Plan
                 Generator, to generate test plans which thoroughly test
                 the software for conformity to the requirements. These
                 test plans are supplied to the third tool, the
                 Automatic Test Executor, for actual testing. The RLP is
                 the subject of this paper. The primary goal of the RLP
                 is to provide the ability to specify the features of a
                 target real-time system in a vocabulary familiar to an
                 application-oriented individual and in a manner
                 suitable for test plan generation. The RLP produces a
                 document which can be easily understood by non-computer
                 personnel. It is expected that this document will
                 function as a key part of the ``contract'' between a
                 real-time system supplier and a customer. This document
                 must also serve as a springboard for the software
                 designers during their development of the actual
                 product. In addition to the requirements document, the
                 RLP also produces an augmented state transition table
                 which describes a finite state machine whose external
                 behavior is identical to the target real-time system as
                 defined by the specified requirements.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Peters:1978:RSR,
  author =       "Lawrence Peters",
  title =        "Relating software requirements and design",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "67--71",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811102",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Software development is a process which has evolved
                 into a number of phases. Although the names of the
                 phases and some of their characteristics differ from
                 contractor to contractor and customer to customer, the
                 functional similarities among sets of phases cannot be
                 ignored. The basic software development scenario
                 depicted by these phases starts with problem
                 identification and definition, requirements
                 specification, design, code, test, and installation and
                 maintenance. Although some ``smearing'' of one phase
                 activity into other(s) may occur, this represents the
                 basic flow. However, it is just that smearing which
                 occurs between requirements and design that we wish to
                 explore here. Identifying or defining problems and
                 solving problems are viewed by many to be separate,
                 distinguishable activities. They are complementary in
                 that one identifies what must be done (requirements)
                 while the other depicts how it will be done (design).
                 But software designers complain bitterly that
                 requirements are poorly defined while customers and
                 analysts often complain that the design is not
                 responsive to the problem(s) as they perceive it.
                 Somehow software designers end up discovering
                 previously unknown requirements and end up solving a
                 problem which is foreign to the customer. Is there a
                 workable mechanism to reduce this difficulty?",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stavely:1978:DFU,
  author =       "Allan M. Stavely",
  title =        "Design feedback and its use in software design aid
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "72--78",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800283.811103",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "It is argued that software system designers would
                 benefit greatly from feedback about the consequences of
                 a proposed design if this feedback could be obtained
                 early in the development process. A taxonomy of
                 possible types of feedback and other design aids is
                 presented, and the capabilities of several existing
                 design aid systems are described relative to this
                 taxonomy.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Yoder:1978:NSC,
  author =       "Cornelia M. Yoder and Marilyn L. Schrag",
  title =        "{Nassi--Shneiderman} charts an alternative to
                 flowcharts for design",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "79--86",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811104",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In recent years structured programming has emerged as
                 an advanced programming technology. During this time,
                 many tools have been developed for facilitating the
                 programmer's use of structured programming. One of
                 these tools, the Structured Flowcharts developed by I.
                 Nassi and B. Shneiderman in 1972, is proving its value
                 in both the design phase and the coding phase of
                 program development. Several programming groups in
                 System Products Division, Endicott, New York, have used
                 the Nassi--Shneiderman charts as replacements for
                 conventional flowcharts in structuring programs. The
                 charts have been used extensively on some projects for
                 structured walk-throughs, design reviews, and
                 education. This paper describes the Nassi--Shneiderman
                 charts and provides explanations of their use in
                 programming, in development process control, in
                 walk-throughs, and in testing. It includes an analysis
                 of the value of Nassi--Shneiderman charts compared to
                 other design and documentation methods such as
                 pseudo-code, HIPO charts, prose, and flowcharts, as
                 well as the authors' experiences in using the
                 Nassi--Shneiderman charts. The paper is intended for a
                 general data processing audience and although no
                 special knowledge is required, familiarity with
                 structured programming concepts would be helpful. The
                 reader should gain insight into the use of
                 Nassi--Shneiderman charts as part of the total
                 development process.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Benson:1978:SQA,
  author =       "J. P. Benson and S. H. Saib",
  title =        "A software quality assurance experiment",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "87--91",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800283.811105",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An experiment was performed to evaluate the ability of
                 executable assertions to detect programming errors in a
                 real time program. Errors selected from the categories
                 of computational errors, data handling errors, and
                 logical errors were inserted in the program. Assertions
                 were then written which detected these errors. While
                 computational errors were easily detected, data
                 handling and logical errors were more difficult to
                 locate. New types of assertions will be required to
                 protect against these errors.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Assertions; Error categories",
}

@Article{Bauer:1978:AGE,
  author =       "Jonathan Bauer and Susan Faasse and Alan Finger and
                 William Goodhue",
  title =        "The automatic generation and execution of function
                 test plans for electronic switching systems",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "92--100",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811106",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A three phase functional testing methodology is
                 described for use in the development cycle of
                 electronic switching systems. The methodology centers
                 on a directed graph model of the system and provides
                 for the checking of system requirements, the generation
                 of functional tests and the automatic execution of
                 these tests.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Martin:1978:SAT,
  author =       "K. A. Martin",
  title =        "Software acceptance testing that goes beyond the
                 book",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "101--105",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800283.811107",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The design of software acceptance tests is as
                 important to meeting contract goals as is the design of
                 algorithms. This statement is particularly significant
                 on fixed price contracts with tight schedules. An
                 extreme instance of the demand placed on acceptance
                 testing can be found in software projects wherein the
                 only rigorous testing that required the Computer
                 Program Configuration Item (CPCI) to exercise its
                 repertoire of load and store instructions was the
                 Formal Qualification Test (FQT). This paper is about
                 such a project, the lessons learned from it, and
                 provides an effective test approach for fixed price
                 contracts. A word or two about the project is
                 appropriate to establish the context that underscores
                 the impact of the above assertion. Initially 30K (core
                 words), 16-bit program instructions were to be
                 developed within one year using a Varian 73 computer
                 with 32K words of memory for a Command and Control
                 application under a fixed price contract. A set of a
                 priori conditions existed that tended to convey the
                 impression that the inherent risks of this endeavor
                 were reasonable. They were the ``facts'' that: Of the
                 30K (core words) to be written, 30\% of this code
                 already existed and would be used. Contractor standards
                 would be allowed for documentation with limited use of
                 Military Specifications No formal Design Reviews or
                 audits would accompany the deliverable CPCI. Existent
                 executive software would suffice. A competent and
                 enthusiastic team was committed to the effort.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Drasch:1978:ITP,
  author =       "Frederick J. Drasch and Richard A. Bowen",
  title =        "{IDBUG}: a tool for program development",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "106--110",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811108",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The construction of a reliable computer program
                 requires, in part, a means of verification of its
                 component parts prior to their integration into the
                 overall system. The verification process may consist of
                 building a test harness to exercise or exhaustively
                 test a procedure. This technique is known as dynamic
                 testing. In practice, the application of dynamic
                 testing requires the coding of a special harness for
                 each procedure. This consumes valuable programming
                 time, as much as 50\% of the total effort (FAIR78). It
                 is also restrictive because the test harness cannot be
                 easily modified to test aspects of a program which it
                 was not originally designed to test. We have built a
                 facility called IDBUG that reduces the programming
                 effort required to employ dynamic testing by automating
                 the construction of the test harness. Additionally, it
                 provides an interactive test environment which permits
                 more flexible testing. This paper describes IDBUG and
                 discusses our experience in its application to
                 maintenance tasks in a commercial environment. Nyone of
                 the ideas put forth here will be especially novel;
                 dynamic testing as a software testing tool has been in
                 use for some time. What we hope to do is illustrate the
                 beneficial aspects of a particular application of
                 dynamic testing. It is argued that testing should play
                 a more limited role in assuring the reliability of
                 software in light of techniques such as structured
                 coding, top-down design, proof of correctness, etc.
                 (McG075). While it is true that eventually the ``art of
                 computer programming'' will become the ``science of
                 producing correct programs'', we believe that more
                 emphasis must be placed on interim solutions to aid in
                 the construction of reliable software. We present IDBUG
                 as such a solution.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stickney:1978:AGT,
  author =       "M. E. Stickney",
  title =        "An application of graph theory to software test data
                 selection",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "111--115",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811109",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Graph theory is playing an increasingly important role
                 in the design, analysis, and testing of computer
                 programs. It's importance is derived from the fact that
                 flow of control and flow of data for any program can be
                 expressed in terms of directed graphs. From the graph
                 representing the flow of control, called the program
                 graph, many others can be derived that either partially
                 or completely preserve the program control structure.
                 One derived graph known as a cyclomatic tree is of
                 particular value in program testing. It is so named
                 because the number of leaves of the tree is equal to
                 the cyclomatic number of the program graph. A thorough
                 treatment of cyclomatic numbers is provided in [3]. A
                 program called the Complexity/Path Analyzer (CPA) has
                 been developed that builds and utilizes a program
                 cyclomatic tree to provide test planning information,
                 automatically place software counters called probes as
                 discussed in [9] and [10] in a program, and provide
                 selected parameters such as program length and program
                 graph cyclomatic number. The paper discusses the
                 features and derivation of cyclomatic trees as well as
                 their value and application to testing and test data
                 generation. A cyclomatic tree provides a test planner
                 with information useful for planning program tests. In
                 particular, it furnishes test data selection criteria
                 for developing tests that are minimally thorough as
                 defined by Huang in [9]. A test data selection
                 criterion will be defined as minimally thorough if any
                 complete test with respect to the criterion is at least
                 minimally thorough. The term complete is used as
                 defined by Goodenhough and Gerhart in [13]. A test is
                 defined to be a non empty sequence of test cases. Each
                 test case consists of an element selected from the
                 input domain of the program being tested. The paper
                 discusses the merits of one particular technique
                 selected to achieve a minimally thorough test data
                 selection criteria. Part of the technique is automated
                 by the CPA program.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Fischer:1978:SQA,
  author =       "Kurt F. Fischer",
  title =        "Software quality assurance tools: {Recent} experience
                 and future requirements",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "116--121",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811110",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The objective of software quality assurance (QA) is to
                 assure sufficient planning, reporting, and control to
                 affect the development of software products which meet
                 their contractual requirements. To implement this
                 objective, eight QA functions can be identified: 1.
                 Initial quality planning 2. Development of software
                 standards and procedures 3. Development of quality
                 assurance tools 4. Conduct of audits and reviews 5.
                 Inspection and surveillance of formal tests 6.
                 Configuration verifications 7. Management of the
                 discrepancy reporting system 8. Retention of QA records
                 The purpose of this paper is to document experiences
                 gained in the use of selected QA tools that perform
                 some of the above functions, to discuss lessons
                 learned, and to suggest future needs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Glasser:1978:ESC,
  author =       "Alan L. Glasser",
  title =        "The evolution of a {Source Code Control System}",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "122--125",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811111",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Source Code Control System (SCCS) is a system for
                 controlling changes to files of text (typically, the
                 source code and documentation of software systems). It
                 is an integral part of a software development and
                 maintenance system known as the Programmer's Workbench
                 (PWB). SCCS has itself undergone considerable change.
                 There have been nine major versions of SCCS. This paper
                 describes the facilities provided by SCCS, and the
                 design changes that were made to SCCS in order to
                 provide a useful and flexible environment in which to
                 conduct the programming process.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Josephs:1978:MCB,
  author =       "William H. Josephs",
  title =        "A mini-computer based library control system",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "126--132",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811112",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "One of the major problems encountered in any large
                 scale programming project is the control of the
                 software. Invariably, such large programs are divided
                 into many smaller elements since these are easier to
                 code, test and document. However, such a division adds
                 new complexity to the task of Configuration Management
                 since the many source modules, data base elements, JCL
                 (Job Control Language) and DATA files must be
                 controlled with the goal of maximizing program
                 integrity and minimizing the chances of procedural
                 errors. Furthermore, whenever any program is released
                 either for field test or for final production, an
                 entire change control procedure must be implemented in
                 order to trace, install, debug and verify fixes or
                 extensions to the original program. These maintenance
                 activities can account for up to 80 percent of the
                 entire programming cost in a large, multi-year project.
                 The library control program (SYSM) presented here was
                 developed to aid in these processes. It has facilities
                 for capturing all elements of a program (commonly
                 called baselining), editing any element or group of
                 elements that have been baselined to build an updated
                 version of the program, adding and/or deleting elements
                 of a program, and listing the current contents of a
                 given element or elements. SYSM is written mainly in
                 FORTRAN, and runs on a Hewlett--Packard HP-21MX
                 computer with two tape drives, the vendor supplied
                 RTE-II or RTE-III operating system, and at least 16K of
                 user available core. It can be used to control code
                 targeted for either the HP21MX itself, or, using the
                 optional HP/LSI-11 link program, code targeted for a
                 Digital Equipment Corp. LSI-11 system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cavano:1978:FMS,
  author =       "Joseph P. Cavano and James A. McCall",
  title =        "A framework for the measurement of software quality",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "133--139",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811113",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Research in software metrics incorporated in a
                 framework established for software quality measurement
                 can potentially provide significant benefits to
                 software quality assurance programs. The research
                 described has been conducted by General Electric
                 Company for the Air Force Systems Command Rome Air
                 Development Center. The problems encountered defining
                 software quality and the approach taken to establish a
                 framework for the measurement of software quality are
                 described in this paper.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cobb:1978:MSU,
  author =       "Gary W. Cobb",
  title =        "A measurement of structure for unstructured
                 programming languages",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "140--147",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811114",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Software Science is a field of Natural Science which
                 deals with the development of measurements which reveal
                 properties of software programs. These measurements are
                 qualified as to their degree of correlation to human
                 beings being able to construct or understand a subject
                 program. Maurice Halstead has pioneered much of the
                 theories in this field ((5) through (10)), which
                 applies statistical and psychological testing
                 techniques to the evaluation of the measurements. The
                 basic inputs to the Halstead predictors are easily
                 measured: the number of distinct operators and
                 operands, and the number of occurrences of the
                 operators and operands. Due to the statistical nature
                 of the measurements, there can be erroneous results
                 when applying them to small sample spaces. However, the
                 predictors are very adequate when applied to large
                 samples, that is, large software systems. In an
                 excellent review article by Fitzsimmons and Love (4),
                 it is pointed out that several of the estimators
                 defined by Halstead assumed that the subject programs
                 were well-structured, and inaccuracy in the predictors
                 can result if they are applied to `unpolished'
                 programs. In fact, Halstead qualified six classes of
                 impurities in code which can cause the length predictor
                 to be inaccurate. The definition of volume for
                 software, another predictor introduced in Halstead's
                 book, is related to the level of the specification of
                 the program. An algorithm which is written in assembly
                 language will have a greater volume than the same
                 algorithm written in Pascal, due to the richness of the
                 semantic constructs that are available in the
                 higher-level languages. Hence, this predictor is
                 language dependent.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bowen:1978:CAS,
  author =       "John B. Bowen",
  title =        "Are current approaches sufficient for measuring
                 software quality?",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "148--155",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811115",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Numerous software quality studies have been performed
                 over the past three years-mostly sponsored by the Rome
                 Air Development Center. It is proposed by the author
                 that more emphasis should be placed on devising and
                 validating quantitative metrics that are indicative of
                 the quality of software when it is being designed and
                 coded. Such measures could be applied effectively, as
                 relative guidelines without formal validation. However
                 for such measures to be predictive of the quality of
                 the delivered software, they must be validated with
                 actual operational error data or data gathered in a
                 simulated operational environment. This paper includes
                 a review of proposed metrics from the literature a
                 report of a Hughes intramodule metric study, and
                 recommendations for refining proposed software quality
                 assurance criteria.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lockett:1978:UPM,
  author =       "Joann Lockett",
  title =        "Using performance metrics in system design",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "156--159",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1007775.811116",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Complexities of system design are great and often lead
                 designers to be inward looking in their analyses.
                 Knowledge from various fields can be of benefit in
                 designing systems [1]. Management accountants can
                 describe economic effects of delays in closing
                 schedules, psychologist can provide significant
                 insights into the behavioral characteristics of users
                 to complex command syntax, computer performance
                 analysts can provide alternatives to describe and to
                 measure responsiveness of systems. Even in the case of
                 an innovative system design, the designer can employ
                 such approaches to identify incipient problems and
                 create alternatives with increased cost effectiveness.
                 This paper describes how performance metrics can be
                 used effectively to support system design.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Southworth:1978:RM,
  author =       "Richard N. Southworth",
  title =        "Responding to {MIL-S-52779}",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "160--164",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811117",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The art and science of computer software development
                 is still changing considerably from year to year, and
                 therefore lacks the established control mechanisms of
                 hardware production programs. Also, because most
                 software is produced in a one-time development program
                 it does not lend itself to the established discrepancy
                 detection and correction techniques used in hardware
                 production programs. Consequently, the software QA
                 program must provide the methodology to detect a
                 deficiency the first time it occurs and effect
                 corrective action. MIL-S-52779: ``Software Quality
                 Assurance Program Requirements,'' has provided a much
                 needed impetus for software development contractors to
                 develop software QA techniques. But much remains to be
                 done. As the state of the art advances MIL-S-52779
                 should be revised accordingly. In this paper the author
                 responds to the present form of the specification,
                 suggests some revisions and additions and briefly
                 discusses a set of QA procedures that should be
                 responsive (fully compliant) with MIL-S-52779.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Tighe:1978:VPS,
  author =       "Michael F. Tighe",
  title =        "The value of a proper software quality assurance
                 methodology",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "165--172",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800283.811118",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes the experiences of a project
                 development team during an attempt to ensure the
                 quality of a new software product. This product was
                 created by a team of software engineers at Digital
                 Equipment Corporation, a mainframe manufacturer. As a
                 result, the definition of ``to ensure the quality of a
                 software product'' meant minimizing the maintenance
                 costs of the new product. Ease of maintenance and a low
                 bug rate after release to the customer were very
                 important goals from the beginning of the project. This
                 paper compares the degree of application and resultant
                 effects of several software quality assurance
                 methodologies upon different parts of the final
                 product. Many of the product's subsystems were created
                 using all of the discussed methodologies rigorously.
                 Some subsystems were created with little or no use of
                 the methodologies. Other subsystems used a mixture. The
                 observed quality of the various subsystems when related
                 to the methodology used to create them provides
                 insights into the interactions between the
                 methodologies. These observations also supply
                 additional experience to reinforce established beliefs
                 concerning the value of quality assurance
                 methodologies.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Belford:1978:QEE,
  author =       "Peter Chase Belford and Carlo Broglio",
  title =        "A quantitative evaluation of the effectiveness of
                 quality assurance as experienced on a large-scale
                 software development effort",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "173--180",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811119",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The purpose of quality assurance on software projects
                 is to achieve high quality products on schedule, within
                 cost, and in compliance with contract requirements.
                 However, historically, the effectiveness of these
                 activities on software projects has not been
                 quantitatively demonstrable because of a lack of data
                 collected on the project combined with a lack of
                 insight into the operational reliability of the system.
                 Quality assurance is a collection of activities on a
                 contractual deliverable whose purpose is to impart a
                 degree of confidence that the deliverable will conform
                 to the customer's concept of what was procured. Under
                 these conditions, quality assurance must be performed
                 with respect to a documented baseline of the concept.
                 This baseline can address the need in the form of
                 requirement statements; the conceptual approach to be
                 followed in the form of a functional specification; or
                 the design to be implemented in the form of a design
                 specification. Further, these baselines are
                 hierarchical in the sense that when quality assurance
                 is applied to a level it is implicitly applied to all
                 lower levels; e.g., if the need is to be satisfied, the
                 conceptual approach must be satisfied. Effective
                 quality assurance programs impart a high degree of
                 confidence to the customer without significant impacts
                 on schedule or cost. Historically, this effectiveness
                 has not been quantitatively demonstrable because of a
                 lack of data collected on the project combined with a
                 lack of insight into the operational reliability of the
                 system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kacik:1978:ESQ,
  author =       "Paul J. Kacik",
  title =        "An example of software quality assurance techniques
                 used in a successful large scale software development",
  journal =      j-SIGMETRICS,
  volume =       "7",
  number =       "3--4",
  pages =        "181--186",
  month =        nov,
  year =         "1978",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/953579.811120",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:52:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Development of the software package for the Combat
                 Grande Air Defense System was considered by the Hughes
                 Aircraft Company to be highly successful in that a
                 reliable system was produced that met customer
                 requirements and was completed within time and budget
                 allocations --- a feat not often attained in large
                 scale software developments. Much of the success can be
                 attributed to the software quality assurance (QA)
                 techniques used. Some of these QA techniques are listed
                 in Table 1 along with the phases in which they were
                 used. This paper describes these QA techniques in some
                 detail, as well as those aspects of the system and
                 software development program that permitted these
                 techniques to be used effectively. Background
                 information is presented first which describes the
                 system, software, organization and software
                 configuration management. This is followed by a
                 description of the three major phases of software
                 development. The overall results are then presented,
                 followed by recommended improvements and conclusions.
                 Many of the QA techniques listed in Table 1 were used
                 in several phases of software development. However, a
                 particular technique is discussed only in the phase in
                 which it was most extensively used.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kreutzer:1979:CSM,
  author =       "Wolfgang Kreutzer",
  title =        "Computer system modelling and simulation",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "9--35",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041854",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "To evaluate the suitability and limitations of
                 software for computer systems modelling, a basic
                 comprehension of the structure of such tools must be
                 provided. A brief discussion of conceptual requirements
                 for the description of discrete models, and computer
                 system models in particular, is followed by a survey of
                 commercially available computer simulation packages.
                 Special and general purpose discrete event simulation
                 and general purpose programming languages are also
                 analysed for their suitability for this class of
                 applications. The survey closes with some
                 recommendations and guidelines for selection and
                 application of computer system simulation tools. To aid
                 the analyst contemplating a computer system modelling
                 project, a brief list of relevant addresses and
                 annotated references is also included.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Turner:1979:ISM,
  author =       "Rollins Turner",
  title =        "An investigation of several mathematical models of
                 queueing systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "36--44",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041855",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A number of simple mathematical models were used to
                 predict average response time of a timesharing system.
                 The target system was a very simple trace driven
                 simulation model, but the workloads were trace files
                 obtained from a real system in normal operation. As
                 such, the workloads were characterized by very high
                 coefficients of variation in resource demands and think
                 times. Mathematical models of the system included
                 independent arrival models (M/M/1 and M/G/1, closed
                 network models) admitting product from solutions, and a
                 more general Markov model. Only the final model
                 produced reasonable accuracy. A number of experiments
                 were performed, in an effort to determine what
                 properties of the system being modeled were responsible
                 for the failure of all the simple mathematical models.
                 The large variance in CPU time and the fact that the
                 system was a closed network were found to be critical
                 factors, and appeared to be the major causes for
                 failure of models that do not take them into account.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sauer:1979:CIQ,
  author =       "Charles H. Sauer",
  title =        "Confidence intervals for queueing simulations of
                 computer systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "45--55",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041856",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Simulation models of computer systems may be
                 formulated as queueing networks. Several methods for
                 confidence interval estimation for queueing simulations
                 are discussed. Empirical studies of these methods are
                 presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kleijnen:1979:NCS,
  author =       "Jack P. C. Kleijnen",
  title =        "A note on computer system data gathering",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "56--56",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041857",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Recently Orchard (1977) proposed a statistical
                 technique for data collection in computer systems. A
                 main idea was the use of random sampling, as opposed to
                 traditional fixed periodic sampling. He further
                 proceeded to derive confidence intervals for the
                 resulting estimator. He also proposed the use of binary
                 (Boolean) variables, e.g., $ q_{it} = 1 $ (or $0$) if
                 at sampling time $t$ the $i$ th `slot' of a queue is
                 occupied (or empty respectively).",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rajaraman:1979:PPV,
  author =       "M. K. Rajaraman",
  title =        "Performance prediction of a virtual machine",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "57--62",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041858",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Modeling and simulation of computer systems have two
                 main objectives. First, to evaluate the performance of
                 a given configuration of a machine and second, to
                 derive a mechanism for prediction of performance when
                 configuration parameters change. This paper addresses
                 the second issue and reports the result of a recent
                 investigation of a Virtual Memory Computer. The results
                 indicate which variables or combination of variables
                 have significant effect on the performance and which do
                 not.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Jain:1979:GSA,
  author =       "Aridaman K. Jain",
  title =        "A guideline to statistical approaches in computer
                 performance evaluation studies",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "63--77",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041859",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Schwartz:1979:DCC,
  author =       "E. Schwartz",
  title =        "Development of credible computer system simulation
                 models",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "78--95",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041860",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Problems encountered during a simulation effort
                 are influenced by the objectives of the simulation.
                 Verification and validation of the simulation model are
                 two such problems which affect the credibility (and
                 usability) of the model. A simulation methodology for
                 Program Design Analysis is described. The goal of this
                 simulation application is to test a design before it is
                 implemented. Techniques are described which enhance the
                 credibility of simulation models. The relationship
                 between Program Design Analysis and the reliability of
                 the system being developed is explored.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Clark:1979:CPE,
  author =       "Jon D. Clark and Thomas J. Reynolds and Michael J.
                 Intille",
  title =        "Computer performance evaluation: an empirical
                 approach",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "97--101",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041861",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer performance evaluation can be delineated into
                 the areas of selection, projection and monitoring. The
                 tuning of existing systems for efficient performance
                 may be viewed as a special case of the projection
                 activity involving modeling, statistics collection and
                 analysis. Mosts tools available today are expensive to
                 use and overly complicated. This paper presents the
                 comparison of two, relatively simple and
                 cost-effective, statistical techniques for performance
                 evaluation: regression and canonical analysis. In
                 addition, the results of the suggested and implemented
                 computer configuration modification is reported.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "canonical analysis; computer performance evaluation;
                 multi-processor; regression analysis",
}

@Article{Willis:1979:TSW,
  author =       "Ron Willis",
  title =        "Techniques in simulation which enhance software
                 reliability",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "1--2",
  pages =        "102--115",
  month =        "Spring-Summer",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041853.1041862",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:30 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A simplified simulation study of an actual software
                 development effort is presented. A model is developed
                 and exercised through various stages of modifications
                 to an originally unreliable soft ware design until
                 viable software design results. Techniques in model
                 development, simulation, analysis, and language
                 capability which lead to enhanced software reliability
                 are discussed. Uniquenesses in the approach presented
                 are contrasted to simulation methods which lack this
                 capability.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Blake:1979:TSM,
  author =       "Russ Blake",
  title =        "{Tailor}: a simple model that works",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "1--11",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805444",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Tailor is an atomic model of the Tandem/16
                 multiple-computer system. Atomic modeling is based on
                 operational analysis and general considerations from
                 queueing theory. Measurements of system atoms define
                 the underlying components of processor usage. The
                 workload is described to the model through a separate
                 set of measurable parameters that comprise the workload
                 atoms. Simple formulae from operational analysis are
                 then applied to predict the amount of equipment
                 necessary to support the projected application.
                 Tailor's accuracy was tested under two very different
                 workloads. For both a large backend database
                 application and a program development system, Tailor
                 was able to predict the equipment needed to handle the
                 workloads to within 5 percent.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Strecker:1979:ACP,
  author =       "William D. Strecker",
  title =        "An analysis of central processor-input-output
                 processor contention",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "27--40",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805445",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Most computer systems have separate central (CPU) and
                 input-output (IOP) processors to permit simultaneous
                 computation and input-output (I/O). It is conventional
                 in such systems to avoid any loss of I/O data by
                 granting the IOP priority over the CPU for memory
                 service. Although this priority discipline is simple to
                 implement it may result in a maximum degradation of CPU
                 performance. In this discussion an analysis of the IOP
                 priority discipline is given together with an analysis
                 of other priority disciplines which require the
                 buffering of IOP requests and results are given showing
                 that only a small amount of buffering is required to
                 produce a noticeable improvement in CPU performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Contention; CPU; I/O interference; Input-output;
                 Memory system; Priority discipline; Processor",
}

@Article{Wiecek:1979:PST,
  author =       "Cheryl A. Wiecek and Simon C. {Steely, Jr.}",
  title =        "Performance simulation as a tool in central processing
                 unit design",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "41--47",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805446",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Performance analysis has always been considered
                 important in computer design work. The area of central
                 processing unit (CPU) design is no exception, where the
                 successful development of performance evaluation tools
                 provides valuable information in the analysis of design
                 tradeoffs. Increasing integration of hardware is
                 producing more complicated processor modules which add
                 to the number of alternatives and decisions to be made
                 in the design process. It is important that these
                 modules work together as a balanced unit with no hidden
                 bottlenecks. This paper describes a project to develop
                 performance simulation as an analysis tool in CPU
                 design. The methodology is first detailed as a three
                 part process in which a performance simulation program
                 is realized that executes an instruction trace using
                 command file directions. Discussion follows on the
                 software implemented, applications of this tool in CPU
                 design, and future goals.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bennett:1979:SDS,
  author =       "David A. Bennett and Christopher A. Landauer",
  title =        "Simulation of a distributed system for performance
                 modelling",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "49--56",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805447",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A distributed system of cooperating minicomputers is
                 simulated by AIMER (Automatic Integration of Multiple
                 Element Radars) to model and analyze the behavior of a
                 radar tracking system. Simulation is applied in the
                 AIMER project in an attempt to model a network of
                 minicomputers to discover a maximally flexible network
                 architecture. Because building the tracking system out
                 of real hardware would not result in a flexible enough
                 testbed system, the proposed configuration is
                 represented by a software emulation. The instruction
                 sets of the individual processors are emulated in order
                 to allow separation of the measurement facilities from
                 the execution of the system. The emulation is supported
                 by a Nano-data QM-1 micro and nano-programmable host.
                 Extensive performance monitoring hooks have been built
                 into the emulation system which allow small performance
                 perturbations to become visible. The tracking network
                 is controlled by a combination firmware operating
                 system and a special emulated virtual control machine.
                 The tracking algorithms run on virtual machines whose
                 instruction sets and computational throughput can be
                 parameterized when the model is generated, or
                 dynamically by an operator during a run. The radar and
                 ground truth environments for the tracking system are
                 simulated with logic resident in one of the emulated
                 machines, allowing these functions to be monitored as
                 accurately as the tracking algorithms. The use of this
                 simulation technique has resulted in an extremely
                 flexible testbed for the development of distributed
                 radar tracking system models. The testbed itself can be
                 quickly tailored to other application problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lazowska:1979:BTA,
  author =       "Edward D. Lazowska",
  title =        "The benchmarking, tuning and analytic modeling of
                 {VAX\slash VMS}",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "57--64",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805448",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes a recent experience in
                 benchmarking, tuning and modelling Digital Equipment
                 Corporation's VMS executive running on their VAX-11/780
                 computer. Although we emphasize modelling here, the
                 three aspects are closely interrelated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Marshall:1979:AMW,
  author =       "William T. Marshall and C. Thomas Nute",
  title =        "Analytic modelling of ``working set like'' replacement
                 algorithms",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "65--72",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805449",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Although a large amount of theoretical work has been
                 performed in the analysis of the pure working set
                 replacement algorithm, little has been done applying
                 these results to the approximations that have been
                 implemented. This paper presents a general technique
                 for the analysis of these implementations by analytic
                 methods. Extensive simulations are reported which
                 validate the analytic model and show significant
                 simplifications that can be made with little loss of
                 accuracy. The problem of choosing memory policy
                 parameter values is examined and related in a simple
                 way to the choice of a working set window size.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Briggs:1979:EBM,
  author =       "Fay{\'e} A. Briggs",
  title =        "Effects of buffered memory requests in multiprocessor
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "73--81",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805450",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A simulation model is developed and used to study the
                 effect of buffering of memory requests on the
                 performance of multiprocessor systems. A multiprocessor
                 system is generalized as a parallel-pipelined processor
                 of order $ (s, p) $, which consists of $p$ parallel
                 processors each of which is a pipelined processor with
                 $s$ degrees of multiprogramming, there can be up to $
                 s*p$ memory requests in each instruction cycle. The
                 memory, which consists of $ N ( = 2^n)$ identical
                 memory modules, is organized such that there are $ \ell
                 ( = 2^i)$ lines and $ m ( = 2^{n - i})$ identical
                 memory modules, where each module is characterized by
                 the address cycle (address hold time) and memory cycle
                 of $a$ and $c$ time units respectively. Too large an $
                 \ell $ is undesirable in a multiprocessor system
                 because of the cost of the processor-memory
                 interconnection network. Hence, we will show how
                 effective buffering can be used to reduce the system
                 cost while effectively maintaining a high level of
                 performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Raffi:1979:ECB,
  author =       "Abbas Raffi",
  title =        "Effects of channel blocking on the performance of
                 shared disk pack in a multi-computer system",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "83--87",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805451",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In a multi-computer environment where several
                 computers share packs of disk drives, the architecture
                 of the disk controller can have significant effect on
                 the throughput of the disk pack. In a simple
                 configuration a controller can allow access to only one
                 disk in the pack at a time, and effectively block other
                 channels from accessing other disks in the pack. A
                 desirable alternative is to be able to access different
                 disks of the same pack simultaneously from different
                 channels. Motivated by the presence of a mixed hardware
                 in an installation to support both configurations, an
                 attempt is made to model each system and produce
                 analytical and simulation results to compare their
                 relative performances. It is predicted that under the
                 prevalent conditions in the installation, a complete
                 switchover to either system should not give rise to
                 significant performance change.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Zahorjan:1979:ESM,
  author =       "John Zahorjan",
  title =        "An exact solution method for the general class of
                 closed separable queueing networks",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "107--112",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805452",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper we present a convolution algorithm for
                 the full class of closed, separable queueing networks.
                 In particular, the algorithm represents an alternative
                 method to those already known for the solution of
                 networks with class changes, and is the first efficient
                 algorithm to deal with Lam-type networks [11]. As an
                 application of the algorithm, we study a simple
                 queueing network with disk I/O devices connected to a
                 single CPU through a single channel. The algorithm is
                 then used to develop a simple, accurate approximation
                 for the blocking of disk devices that takes place when
                 a customer using a disk is waiting for or in service at
                 the channel.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kienzle:1979:SAQ,
  author =       "Martin G. Kienzle and K. C. Sevcik",
  title =        "Survey of analytic queueing network models of computer
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "113--129",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805453",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A number of case studies involving the use of queueing
                 network models to investigate actual computer systems
                 are surveyed. After suggesting a framework by which
                 case studies can be classified, we contrast various
                 parameter estimation methods for specifying model
                 parameters based on measurement data. A tabular summary
                 indicates the relationships among nineteen case
                 studies.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Landry:1979:SEP,
  author =       "Steve P. Landry and Bruce D. Shriver",
  title =        "A simulation environment for performing dataflow
                 research",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "131--139",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805454",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Dataflow languages and processors are currently being
                 extensively studied because of their respective ability
                 to specify and execute programs which exhibit a high
                 degree of parallel and/or asynchronous activity [12,
                 7]. This paper describes a comprehensive simulation
                 environment that allows for the execution and
                 monitoring of dataflow programs. One overall objective
                 of this facility was to meet the needs of researchers
                 in such diverse areas as computer architecture,
                 algorithm analysis, and language design and
                 implementation. Another objective was to accommodate
                 the semantics of several of the contending abstract
                 dataflow models [2, 4]. Additionally, it was desired to
                 enhance the abstract dataflow models which the
                 simulator would support. These objectives, combined
                 with the desired debugging and metering requirements,
                 directed the design of the overall system. A brief
                 introduction to dataflow and its related terminology is
                 given to assist the reader. A companion paper [6]
                 describes an augmentation to the basic simulation
                 facility presented here that allows for the execution
                 of dataflow programs on processors having finite
                 resources.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Langan:1979:SED,
  author =       "David D. Langan and Bruce D. Shriver",
  title =        "Simulated execution of dataflow programs on processors
                 having finite resources",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "141--149",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805455",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Dataflow languages and processors are currently being
                 extensively studied because they provide for the
                 specification and realization of processes exhibiting a
                 high degree of parallel and/or asynchronous activity
                 [12, 8]. Several researchers have developed simulators
                 for specific candidate dataflow architectures in which
                 there are essentially an infinite number of resources
                 available to the nost machine [9, 1]. This is done to
                 study the degree of parallelism which is achievable
                 with a given version of an algorithm. However, it is an
                 equally important (and neglected) area to study the
                 behavior of programs executing in candidate computer
                 systems having a finite amount of resources. This paper
                 presents results which have been obtained from such
                 modeling. It is shown that in such a system certain
                 ``critical nodes'' must be given priority of execution
                 when competing with other nodes for the same resources
                 in order to achieve the maximum system throughput. It
                 is suggested that the abstract dataflow model be
                 modified to accommodate such situations. Various design
                 trade-offs associated with the implementation of the
                 simulator are discussed along with a description of
                 available features. A companion paper [6] describes the
                 general dataflow simulation facility which provided the
                 basis of this work.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Unger:1979:OSI,
  author =       "Brian W. Unger and James R. Parker",
  title =        "An operating system implementation and simulation
                 language {(OASIS)}",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "151--161",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805456",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An approach to the implementation and simulation of
                 system software for multicomputer architectures is
                 described. OASIS, a variant of the SIMULA 67 language,
                 provides tools for both hardware modelling and system
                 software development. The latter includes an extensible
                 module type with flexible intermodule access control.
                 Hardware is characterized at the processor/memory level
                 so that system software resource control and allocation
                 policies can be implemented at a functional level.
                 Concurrent module execution by multiple processors,
                 with or without shared memory, can be simulated
                 directly. The OASIS modules in such a simulation can
                 closely parallel the structure of actual system
                 software. Thus, once a design is shown viable by
                 simulation, the implementation of actual software can
                 be a simple translation of OASIS modules. A brief
                 overview of OASIS features is presented followed by a
                 simple example.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sanguinetti:1979:TIS,
  author =       "John Sanguinetti",
  title =        "A technique for integrating simulation and system
                 design",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "163--172",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805457",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A technique for simulating incomplete systems is given
                 which allows performance prediction during system
                 design. This technique, called integrated simulation,
                 allows the system design to itself be a simulation
                 model, thus avoiding the overhead of maintaining a
                 separate, valid simulation model for the system. The
                 paper presents integrated simulation in the framework
                 of a system modeling language called the Program
                 Process Modeling Language, PPML. This language provides
                 a means for describing systems of concurrent processes
                 in both abstract and explicit terms, thus lending
                 itself well to a top-down design method. In the design
                 process, any PPML representation of the system can be
                 simulated directly, from the most abstract design to
                 the completely elaborated system. Simulation of the
                 completely elaborated system is, in fact, simply the
                 system in execution. The paper defines PPML and
                 describes the techniques required to simulate PPML
                 systems given various underlying machines. It concludes
                 with a discussion of the limitations of the integrated
                 simulation method.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Razouk:1979:EMS,
  author =       "Rami R. Razouk and Mary Vernon and Gerald Estrin",
  title =        "Evaluation methods in {SARA} --- the graph model
                 simulator",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "189--206",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805458",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The supported methodology evolving in the SARA (System
                 ARchitects' Apprentice) system creates a design
                 frame-work on which increasingly powerful analytical
                 tools are to be grafted. Control flow analyses and
                 program verification tools have shown promise. However,
                 in the realm of the complex systems which interest us
                 there is a great deal of research and development to be
                 done before we can count on the use of such powerful
                 tools. We must always be prepared to resort to
                 experiments for evaluation of proposed designs. This
                 paper describes a fundamental SARA tool, the graph
                 model simulator. During top-down refinement of a
                 design, the simulator is used to test consistency
                 between the levels of abstraction. During composition,
                 known building blocks are linked together and the
                 composite graph model is tested relative to the lowest
                 top-down model. Design of test environments is
                 integrated with the multilevel design process. The SARA
                 methodology is exemplified through design of a higher
                 level building block to do a simple FFT.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Yu:1979:MSD,
  author =       "Stone H. Yu and Tadao Murata",
  title =        "Modeling and simulating data flow computations at
                 machine language level",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "207--213",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805459",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper is concerned with the data flow
                 organization of computers and programs, which exhibits
                 a good deal of inherent concurrencies in a computation
                 by imposing no superfluous precedence constraints. In
                 view of the popularity of parallel and distributed
                 processing, this organization can be expected to play
                 an increasingly prominent role in the design and
                 development of computer systems. A schematic diagram
                 called DF-graphs, suitable for modeling data flow
                 computations at the machine language level, is
                 introduced. To facilitate the storage of DF-graphs in
                 computers, matrix equations which fully describe their
                 structure and their dynamic behaviors are developed as
                 an alternate representation. Also demonstrated is the
                 feasibility of simulating the execution of computations
                 specified by DF-graphs on a network of conventional
                 mini- and microprocessors.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Mattheyses:1979:MSA,
  author =       "R. M. Mattheyses and S. E. Conry",
  title =        "Models for specification and analysis of parallel
                 computing systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "215--224",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805460",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The problem of designing a properly functioning
                 parallel hardware or software system is considerably
                 more difficult than that of designing a similar
                 sequential system. In this paper we formulate criteria
                 which a design methodology for parallel systems should
                 satisfy and explore the use of various models as the
                 basis for such a design tool.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gertner:1979:PEC,
  author =       "Ilya Gertner",
  title =        "Performance evaluation of communicating processes",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "241--248",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805461",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper concerns the performance evaluation of an
                 operating system based on communicating processes.
                 Processes communicate via messages and there is no
                 shared data. Execution of a program is abstracted as a
                 sequence of events to denote significant computational
                 steps. A finite state machine model of computation is
                 used for the specifications of abstract computational
                 properties and, thereafter, for the selective analysis
                 of measurement data. A set of conventions is developed
                 to characterize the performance of communicating
                 processes. A hierarchical layering technique is used to
                 concisely describe the characteristics of large
                 systems. A performance monitoring system was
                 implemented and applied to the analysis of RIG, a
                 message-based operating system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Spooner:1979:BIS,
  author =       "Christopher R. Spooner",
  title =        "Benchmarking interactive systems: {Producing} the
                 software",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "249--257",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800188.805462",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The author has recently developed a new methodology of
                 benchmarking, which is being applied to a procurement
                 in which (a) a single integrated interactive
                 application is to span a distributed configuration of
                 computing hardware, (b) the configuration is unknown
                 when the benchmark is being developed, and (c) the
                 application software will be written after the
                 benchmark has been run. The buyer prepares a simulation
                 model of the intended application in the form of
                 programs that will run on the hardware being
                 benchmarked. Each competing vendor is expected to tune
                 the performance of this model to the hardware
                 configuration that he has proposed, so he will require
                 several versions of the model. This presents the buyer
                 with a formidable software-production problem, which is
                 further complicated by a requirement for extreme
                 flexibility and reliability. The paper addresses the
                 software-production problem and describes its solution.
                 The solution was to develop an automated
                 code-production system based on two principal design
                 features. First, the model and its translator are both
                 written in the same language; secondly, the common
                 language is selected on the basis of readability and
                 extensibility. The paper examines why this approach to
                 the code-production problem was successful. Though the
                 code-production system was developed to support a
                 particular benchmarking approach, it should also be
                 useful in other modeling situations. Indeed it might be
                 of interest in any field where readability,
                 reliability, ease of maintenance, and economy of
                 programming effort are considered important.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Dujmovic:1979:CCP,
  author =       "Jozo J. Dujmovi{\'c}",
  title =        "Criteria for computer performance analysis",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "259--267",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805463",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer evaluation, comparison, and selection is
                 essentially a decision process. The decision making is
                 based on a number of worth indicators, including
                 various computer performance indicators. The
                 performance indicators are obtained through the
                 computer performance measurement procedure.
                 Consequently, this procedure should be completely
                 conditioned by the decision process. This paper
                 investigates various aspects of computer performance
                 measurement and evaluation procedure within the context
                 of computer evaluation, comparison and selection
                 process based on the Logic Scoring of Preference
                 method. The set of elementary criteria for performance
                 evaluation is proposed and the corresponding set of
                 performance indicators is defined. The necessary
                 performance measurements are based on the standardized
                 set of synthetic benchmark programs and include three
                 separate measurements: monoprogramming performance
                 measurement, multiprogramming performance measurement,
                 and multiprogramming efficiency measurement. Using the
                 proposed elementary criteria, the measured performance
                 indicators can be transformed into elementary
                 preferences and aggregated with other non-performance
                 elementary preferences obtained through the evaluation
                 process. The applicability of presented elementary
                 criteria is illustrated by numerical examples.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  xxauthor =     "Jozo J. Dujomovi{\'c}",
}

@Article{Dyal:1979:SBS,
  author =       "James O. Dyal and William {DeWald, Jr.}",
  title =        "Small business system performance analysis",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "269--275",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805464",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents results from the performance
                 simulation study of a small business-oriented computer
                 system. The system, SPERRY UNIVAC BC/7-700, is
                 commercially available in the configuration modeled and
                 in other higher performance models. All BC/7 systems
                 modeled are supported with highly interactive
                 applications software systems. The model is
                 parameterized to select one or more workstations and
                 one or more cartridge disks. File allocations are by
                 cylinder. Seek times are computed by remembering the
                 position of each movable arm. References are randomized
                 within each file, but the sequence in which files are
                 accessed is controlled by the application logic, in
                 conjunction with the number of line items/order. Most
                 event times are not constant, but the result of drawing
                 randomly against empirical distributions with specified
                 mean and standard deviation. For this study, the system
                 simulated is composed of a single work-station running
                 the highly interactive on-line version of a
                 sophisticated order entry application package.
                 Principal performance measures are system throughput
                 and response time, including operator action times. It
                 is found that, in the single workstation environment,
                 performance is very cost effective in this highly
                 competitive part of the information system market.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Huff:1979:SCR,
  author =       "Robert W. Huff",
  title =        "System characterization of a {Retail Business
                 System}",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "277--284",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805465",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The complexities of Retail Business Systems today
                 require a thorough understanding of how functional
                 requirements impact desired system performance. It is
                 no longer feasible to discretely test and evaluate
                 individual system components without considering their
                 inter-relationship. The techniques described in this
                 presentation will define the method of system
                 characterization of products prior to customer
                 delivery. Three techniques are utilized to characterize
                 system performance --- simulation, stimulation, and
                 performance measurement. Simulation involves writing a
                 mathematical model which is enhanced from a product
                 feasibility model to a system configuration tool as a
                 result of stimulation and measurement activities.
                 Stimulation consists of using emulators to load the
                 system component under test as if the actual system is
                 inter-connected. The emulators are programmed to
                 produce a processing volume which can exceed the peak
                 benchmark of the potential user. Performance
                 measurement is accomplished during the stimulation
                 activity using hardware/ software probes to monitor
                 specific system parameters. These monitors provide
                 vital information to determine total system capacity
                 and the expected system performance for a given
                 configuration. The information derived from system
                 characterization is invaluable in providing the
                 customer with a realistic expectation of system
                 capability to perform its present functions and in
                 projecting future growth potential.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stroebel:1979:FPA,
  author =       "Gary Stroebel",
  title =        "Field performance aids for {IBM GSD} systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "3",
  pages =        "285--291",
  month =        "Fall",
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1013608.805466",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:53:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A series of field performance aids have been developed
                 to assist IBM Systems Engineers evaluate the
                 performance of System/3, System/34, and System/38
                 configurations. Use of those aids is appropriate at
                 proposal time, for preinstallation design, for tuning,
                 and for upgrade studies. This paper overviews some of
                 the key features of these aids as they pertain to the
                 user interface, workload characterization, and
                 performance models.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Clark:1979:FAP,
  author =       "Jon D. Clark",
  title =        "A feature analysis of performance evaluation texts",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "4",
  pages =        "9--11",
  month =        dec,
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041865",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer performance analysis, whether it be for
                 design, selection or improvement, has a large body of
                 literature to draw upon. It is surprising, however,
                 that few texts exist on the subject. The purpose of
                 this paper is to provide a feature analysis of the four
                 major texts suitable for professional and academic
                 purposes.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer performance evaluation; computer system
                 selection",
}

@Article{Dowdy:1979:SWT,
  author =       "Lawrence W. Dowdy",
  title =        "Synopsis of workshop on the theory and application of
                 analytical models to {ADP} system performance
                 prediction",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "4",
  pages =        "13--17",
  month =        dec,
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041866",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A workshop on the theory and application of analytical
                 models to ADP system performance prediction was held on
                 March 12-13, 1979, at the University of Maryland. The
                 final agenda of the workshop is included as an
                 appendix. Six sessions were conducted: (1) theoretical
                 advances, (2) operational analysis, (3) effectiveness
                 of analytical modeling techniques, (4) validation, (5)
                 case studies and applications, and (6) modeling tools.
                 A summary of each session is presented below. A list of
                 references is provided for more detailed information.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Petrella:1979:SWS,
  author =       "Arthur Petrella and Harold Farrey",
  title =        "Simulating working sets under {MVS}",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "4",
  pages =        "24--36",
  month =        dec,
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041867",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concept of a `working-set' of a program running in
                 a virtual memory environment is now so familiar that
                 many of us fail to realize just how little we really
                 know about what it is, what it means, and what can be
                 done to make such knowledge actually useful. This
                 follows, perhaps, from the abstract and apparently
                 intangible facade that tends to obscure the meaning of
                 working set. What we cannot measure often ranks high in
                 curiosity value, but ranks low in pragmatic utility.
                 Where we have measures, as in the page-seconds of
                 SMF/MVS, the situation becomes even more curious: here
                 a single number purports to tell us something about the
                 working set of a program, and maybe something about the
                 working sets of other concurrent programs, but not very
                 much about either. This paper describes a case in which
                 the concept of the elusive working set has been
                 encountered in practice, has been intensively analyzed,
                 and finally, has been confronted in its own realm. It
                 has been trapped, wrapped, and, at last, forced to
                 reveal itself for what it really is. It is not a
                 number! Yet it can be measured. And what it is,
                 together with its measures, turns out to be something
                 not only high in curiosity value, but also something
                 very useful as a means to predict the page faulting
                 behavior of a program running in a relatively complex
                 multiprogrammed environment. The information presented
                 here relates to experience gained during the conversion
                 of a discrete event simulation model to a hybrid model
                 which employs analytical techniques to forecast the
                 duration of `steady-state' intervals between mix-change
                 events in the simulation of a network-scheduled job
                 stream processing on a 370/168-3AP under MVS. The
                 specific `encounter' with the concept of working sets
                 came about when an analytical treatment of program
                 paging was incorporated into the model. As a result of
                 considerable luck, ingenuity, and brute-force
                 empiricism, the model won. Several examples of
                 empirically derived characteristic working set
                 functions, together with typical model results, are
                 supported with a discussion of relevant modeling
                 techniques and areas of application.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Pierson:1979:PEM,
  author =       "Daniel L. Pierson",
  title =        "Performance evaluation of a minicomputer-based data
                 collection system",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "4",
  pages =        "37--44",
  month =        dec,
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041868",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper discussed the problems encountered and
                 techniques used in conducting the performance
                 evaluation of a multi-processor on-line manpower data
                 collection system. The two main problems were: (1) a
                 total lack of available software tools, and (2) many
                 commonly used hardware monitor measures (e.g., CPU
                 busy, disk seek in progress) were either meaningless or
                 not available. The main technique used to circumvent
                 these problems was detailed analysis of one-word
                 resolution memory maps. Some additional data collection
                 techniques were (1) time-stamped channel measurements
                 used to derive some system component utilization
                 characteristics and (2) manual stopwatch timings used
                 to identify the system's terminal response times.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Artis:1979:CPM,
  author =       "H. Pat Artis",
  title =        "Capacity planning for {MVS} computer systems",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "4",
  pages =        "45--62",
  month =        dec,
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041869",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The current status of an implementation of a
                 methodology relating load, capacity and service for IBM
                 MVS computer systems is presented. This methodology
                 encompasses systems whose workloads include batch, time
                 sharing and transaction processing. The implementation
                 includes workload classification, mix representation
                 and analysis, automatic benchmarking, and exhaust point
                 forecasting.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rajaraman:1979:PVM,
  author =       "M. K. Rajaraman",
  title =        "Performance of a virtual memory: some experimental
                 results",
  journal =      j-SIGMETRICS,
  volume =       "8",
  number =       "4",
  pages =        "63--68",
  month =        dec,
  year =         "1979",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041870",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:32 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper reports the results of simulation
                 experiment of a model of a virtual memory computer. The
                 model consists of three major subsystems: Program
                 Behavior, Memory Allocation and Secondary Storage. By
                 adapting existing models of these subsystems an overall
                 model for the computer operation is developed and its
                 performance is tested for various design alternatives.
                 The results are reported for different paging devices,
                 levels of multiprogramming, job mixes, memory
                 allocation scheme, page service scheduling and page
                 replacement rate.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Denning:1980:WWS,
  author =       "Peter J. Denning",
  title =        "What's a working set?",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "6--10",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041873",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "I am writing about the paper by A. Petrella and H.
                 Farrey, of IBM, SIMULATING WORKING SETS UNDER MVS,
                 reprinted in the SIGMETRICS Newsletter, Issue (8, 4),
                 winter 1979-80. The paper is an amalgam of very good
                 modeling work and misinformation about the working set
                 concept. I will summarize the important contributions
                 and give a short essay about working sets.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Petrella:1980:SWS,
  author =       "Arthur Petrella and Harold Farrey",
  title =        "Simulating working sets under {MVS}",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "11--23",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041874",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concept of a `working-set' of a program running in
                 a virtual memory environment is now so familiar that
                 many of us fail to realize just how little we really
                 know about what it is, what it means, and what can be
                 done to make such knowledge actually useful. This
                 follows, perhaps, from the abstract and apparently
                 intangible facade that tends to obscure the meaning of
                 working set. What we cannot measure of ten ranks high
                 in curiosity value, but ranks low in pragmatic utility.
                 Where we have measures, as in the page-seconds of
                 SMF/MVS, the situation becomes even more curious: here
                 a single number purports to tell us something about the
                 working set of a program, and maybe something about the
                 working sets of other concurrent programs, but not very
                 much about either. This paper describes a case in which
                 the concept of the elusive working set has been
                 encountered in practice, has been intensively analyzed,
                 and finally, has been confronted in its own realm. It
                 has been trapped, wrapped, and, at last, forced to
                 reveal it self for what it really is. It is not a
                 number! Yet it can be measured. And what it is,
                 together with its measures, turns out to be something
                 not only high in curiosity value, but also something
                 very useful as a means to predict the page faulting
                 behavior of a program running in a relatively complex
                 multiprogrammed environment. The information presented
                 here relates to experience gained during the conversion
                 of a discrete event simulation model to a hybrid model
                 which employs analytical techniques to forecast the
                 duration of `steady-state' intervals between mix-change
                 events in the simulation of a network-scheduled job
                 stream processing on a 370/168-3AP under MVS. The
                 specific `encounter' with the concept of working sets
                 came about when an analytical treatment of program
                 paging was incorporated into the model. As a result of
                 considerable luck, ingenuity, and brute-force
                 empiricism, the model won. Several examples of
                 empirically derived characteristic working set
                 functions, together with typical model results, are
                 supported with a discussion of relevant modeling
                 techniques and areas of application.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Spiegel:1980:MEP,
  author =       "Mitchell G. Spiegel",
  title =        "Measuring and evaluating performance",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "33--34",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041875",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The subject of system performance measurement and
                 evaluation has undergone as many generations of changes
                 as the systems themselves. The problem of what to
                 measure and evaluate is complicated by the fact that
                 computing and communications, having become technically
                 similar (digital), will undergo further fusion. Because
                 the technologies are merging, a comparison of their
                 respective origins is instructive. Communications and
                 computing do not share a common history. Communications
                 performance evaluation began as a turn-of-the-century
                 issue. Important performance attributes of voice
                 communications systems were accessability and
                 reliability. The general public and communications
                 system analysts always viewed the voice communications
                 systems as a bundled service, with little emphasis on
                 the characteristics of its individual components.
                 Performance was `engineered' into communications
                 systems for given workload capacity levels (traffic). A
                 reliable service offering evolved over two decades
                 (1920's and 1930's) and was expanded to include data as
                 well as voice communications. The voice network used
                 primarily analog transmission techniques, because voice
                 traffic grew far more rapidly than data. Pulse code
                 modulation (PCM) techniques, employing digital
                 transmission, reversed the trend of analog circuitry.
                 In the future, communications transmission, switching,
                 and integrated services networks (voice, data,
                 facsimile, picture) will be implemented exclusively
                 with digital techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Dixon:1980:PMI,
  author =       "P. J. Dixon",
  title =        "Planning {MIS} investment and expense levels",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "35--37",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041876",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Asking for capital for data processing and
                 telecommunications equipment in not exactly popular
                 with most Boards of Directors in most companies.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Moran:1980:CPV,
  author =       "Thomas S. Moran",
  title =        "Capacity planning: `the volume'",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "38--40",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041877",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Some comments on past, present, and future measures of
                 volume as it affects planning for computer systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{DeMarco:1980:BLB,
  author =       "Tom DeMarco",
  title =        "Breaking the language barrier",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "41--45",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041878",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The systems analyst and the user are not independent
                 entities; each depends on the other. When communication
                 problems get in their way, however, the relationship
                 can turn adversary. The real problem in most system or
                 program development efforts may be that English, left
                 to itself, is too subtle, too open to personal
                 interpretation, to be appropriate in the structured
                 world of DP.Tom DeMarco shows how to impose limits on
                 our native language so analysts, designers, programmers
                 and users can safely use it to define what they are
                 trying to develop. This week he starts by giving some
                 hints on that most basic of DP jobs, setting up the
                 system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Giles:1980:CSM,
  author =       "Howard L. Giles",
  title =        "Communications systems management",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "46--51",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041879",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "As data processing systems have grown from primarily
                 batch-oriented applications to today's fairly extensive
                 on-line systems, the management system required to
                 control these resources has changed. This system
                 evolution is forcing management to focus their
                 attention on controlling the distribution of
                 information to various users performing many diverse
                 applications. Communications Systems Management is the
                 process used to manage and control the distribution of
                 information in an on-line system for maximum
                 performance and productivity. It consists of those
                 techniques and tools needed to operate, maintain,
                 repair, install and plan for the continuous operation
                 of a communications-oriented information system. The
                 following pages describe the management functions
                 needed to ensure that on-line system operation will be
                 successful.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Erlandson:1980:SEM,
  author =       "Robert F. Erlandson",
  title =        "System evaluation methodologies: combined
                 multidimensional scaling and ordering techniques",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "52--58",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041880",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "It is a difficult task to evaluate existing
                 large-scale systems; it is even more difficult to
                 evaluate alternative designs for future systems. Yet,
                 such decisions are necessary because of the long
                 development and implementation times involved.
                 Decisions must be made today about future systems for
                 telecommunications, power, health-care delivery,
                 transportation, etc. These systems change slowly
                 because additions or modifications are costly and must
                 mesh with the existing elements, hence, great care must
                 be given to the establishment of long-term goals and
                 the evaluation of alternative future system designs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Pearson:1980:MCU,
  author =       "Sammy W. Pearson and James E. Bailey",
  title =        "Measurement of computer user satisfaction",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "1",
  pages =        "59--68",
  month =        "Spring",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041872.1041881",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:40 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents the development and evaluation of
                 a questionnaire designed to quantitatively measure
                 computer user satisfaction. The administration,
                 scoring, and interpretation of the questionnaire are
                 also addressed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Chandy:1980:CAP,
  author =       "K. Mani Chandy and Charles H. Sauer",
  title =        "Computational algorithms for product form queueing
                 networks",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "1--1",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806144",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In the last two decades there has been special
                 interest in queueing networks with a product form
                 solution. These have been widely used as models of
                 computer systems and communication networks. Two new
                 computational algorithms for product form networks are
                 presented. A comprehensive treatment of these
                 algorithms and the two important existing algorithms,
                 convolution and mean value analysis, is given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Performance evaluation; Product form; Queueing
                 networks",
}

@Article{Gordon:1980:ICP,
  author =       "Karen D. Gordon and Lawrence W. Dowdy",
  title =        "The impact of certain parameter estimation errors in
                 queueing network models",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "3--9",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806145",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The effect that parameter estimation errors have on
                 performance in closed product form queueing networks is
                 investigated. In particular, the effects of errors in
                 the relative utilization estimates of the servers are
                 analyzed. It is shown that in single class load
                 independent networks, the resulting errors in
                 throughput and utilizations are of approximately the
                 same percentage as the errors in the relative
                 utilization estimates. This result does not hold in
                 networks with load dependent servers or multiple
                 customer classes. The percentage errors in mean queue
                 length depend upon the degree of multiprogramming in
                 the network. Errors in mean queue lengths can become
                 unbounded as the degree of multiprogramming becomes
                 unbounded. Implications of these results to computer
                 system modeling are discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Blake:1980:XIM,
  author =       "Russ Blake",
  title =        "{XRAY}: {Instrumentation} for multiple computers",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "11--25",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806146",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "XRAY presents a global view of the performance of
                 hardware and software components on multiple,
                 distributed computers. The set of components chosen for
                 measurement can be changed at any time throughout a
                 network of systems, and can be selected to minimize
                 data collection time and measurement space. In the
                 course of normal activities the operating system
                 executes firmware which increments counters for the
                 measured components. Periodically, the counters are
                 recorded in an ordinary file by a process in each
                 processor. An analysis program permits browsing through
                 components and plotting counters in real time. Analysis
                 focuses on detecting the distributed sources of
                 excessive activity.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hughes:1980:DDA,
  author =       "James H. Hughes",
  title =        "{DIAMOND} a digital analyzer and monitoring device",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "27--34",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806147",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes the design and application of a
                 special purpose computer system. It was developed as an
                 internal tool by a computer manufacturer, and has been
                 used in solving a variety of measurement problems
                 encountered in computer performance evaluation.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bashioum:1980:BIS,
  author =       "Douglas L. Bashioum",
  title =        "Benchmarking interactive systems: {Calibrating} the
                 model",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "35--41",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806148",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A methodology for benchmarking dedicated, interactive
                 systems has been developed at The MITRE Corporation.
                 This methodology uses a synthetic program model of the
                 application which runs on the proposed
                 hardware/operating system configurations and is driven
                 by a statistically derived load. System performance is
                 measured by analyzing the synthetic transaction
                 response times. The methodology yields assurances to a
                 buyer that the benchmarked system has at least an a
                 priori defined amount of computer power available for
                 applications-oriented software. This paper examines the
                 methodology and the problems that were encountered and
                 solutions which have been used in calibrating a
                 benchmark model for a specific application. The
                 benchmark was designed to model a large interactive
                 information processing application on a procurement
                 requiring loosely-coupled (no shared memory)
                 multicomputer systems. The model consists of a set of
                 interacting synthetic program cells, each composed of
                 several abstractly defined components. The model is
                 maintained in a very high level language that is
                 automatically translated into a standard High Order
                 Language (typically FORTRAN or COBOL) for delivery to
                 the competing vendors. These delivered model cells
                 contain automatically generated size and time filler
                 code that ``calibrate'' the cells to consume the
                 appropriate CPU time and memory space as defined by the
                 abstract size units after accounting for each vendor's
                 hardware and proposed system design.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Benchmark; Calibration; Computer performance
                 measurement; Distributed processing; Interactive
                 systems; Modeling; Real-time; Simulation; Synthetic
                 programs",
}

@Article{Lehmann:1980:PEP,
  author =       "Axel Lehmann",
  title =        "Performance evaluation and prediction of storage
                 hierarchies",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "43--54",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1009375.806149",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper proposes a modelling methodology combining
                 simulation and analysis for computer performance
                 evaluation and prediction. The methodology is based on
                 a special workload model that is suitable for the
                 generation and description of dynamic program
                 behaviour. A description of this workload model is
                 given in section 2. The applicability of this concept
                 with respect to the design of new storage systems, as
                 well as the improvement or comparison of existing
                 systems, will be described by investigation of the
                 efficiency of small cache memories in section 3.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Alanko:1980:MER,
  author =       "Timo O. Alanko and Ilkka J. Haikala and Petri H.
                 Kutvonen",
  title =        "Methodology and empirical results of program behaviour
                 measurements",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "55--66",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806150",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Program behaviour characteristics were examined using
                 data gathered from real program executions. Experiments
                 were performed in a segmented virtual memory with a
                 working set policy; the analyzing costs were kept low
                 using an efficient data reduction method. Empirical
                 results were obtained concerning the influence of the
                 window size on program behaviour characteristics, the
                 accuracy of some average working set size
                 approximations and the sensitivity of program behaviour
                 to the program's input data. These results show that
                 some commonly used assumptions concerning program
                 behaviour are inaccurate. Also there seem to exist
                 ``ill-behaving'' programs, the behaviour of which does
                 not correspond well with results obtained earlier. The
                 effects of real-time delays during program execution
                 were considered using a new simple method. As an
                 additional experiment, segmenting and paging were
                 compared using various performance statistics; the
                 results seem to favour segmenting.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kumar:1980:PRB,
  author =       "Gopa Kumar and C. Thomas Nute",
  title =        "Program restructuring for block structured languages",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "67--79",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806151",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Prior studies of program restructuring to increase the
                 degree of locality of a program in a paged virtual
                 memory system were restricted to statically allocated
                 codes only. This work develops a restructuring
                 methodology for block structured languages like Algol,
                 with dynamic memory allocation. We subsequently
                 restructure and analyze different classes of programs
                 using this methodology and study the performance gains
                 realized with different restructuring heuristics.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Vantilborgh:1980:NCD,
  author =       "Hendrik T. Vantilborgh and Richard L. Garner and
                 Edward D. Lazowska",
  title =        "Near-complete decomposability of queueing networks
                 with clusters of strongly interacting servers",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "81--92",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1009375.806152",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The near-complete decomposability of queueing network
                 models of computer systems is generally supported by
                 very large differences in the service rates of the
                 servers. In this paper we show how such models may
                 still be nearly completely decomposable if on the one
                 hand these large differences can no longer be
                 realistically assumed (as is the case, for example, in
                 computer networks) but if on the other hand clusters of
                 strongly interacting servers exist. Our results may be
                 viewed as a bridge between the approaches to the
                 approximate analysis of queueing networks advanced by
                 Courtois and by Chandy, Herzog and Woo, since we show
                 circumstances under which the former approach leads to
                 exactly the same method of analysis as the latter. In
                 contrast to the Chandy, Herzog and Woo theorem,
                 however, the theory of near-complete decomposability
                 does not rely on the beneficent properties of queueing
                 networks exhibiting product form solutions. Thus our
                 results may point the way towards the theoretically
                 sound application of simple and intuitively appealing
                 approximate analysis techniques to non-product-form
                 networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brandwajn:1980:FRE,
  author =       "Alexandre Brandwajn",
  title =        "Further results on equivalence and decomposition in
                 queueing network models",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "93--104",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806153",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper addresses three aspects related to the
                 notion of exact equivalence in queueing models. In many
                 cases the parameters of a system equivalent to a given
                 model involve only a small subset of conditional
                 probabilities of the state of the original model given
                 the equivalent one. It is shown that meaningful bounds
                 may be obtained for the conditional probabilities of
                 interest with little computational effort. Such bounds
                 are useful in assessing processing capacities as well
                 as the accuracy of approximate solutions. As a second
                 point it is shown that the notion of exact equivalence
                 may be easily extended to networks with non-exponential
                 servers. This is done for both the methods of
                 supplementary variables and for the embedded Markov
                 chain technique. Qualitative analysis of approximation
                 methods is also discussed. Finally, numerical methods
                 based on the notion of exact equivalence, i.e.
                 operating on conditional probabilities, are
                 considered.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stewart:1980:ECF,
  author =       "William J. Stewart and Gerald A. Zeiszler",
  title =        "On the existence of composite flow equivalent
                 {Markovian} servers",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "105--116",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1009375.806154",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queueing networks have been used to model a large
                 variety of complex systems. However, once a realistic
                 model has been constructed it has generally been
                 necessary to distort and modify it so that an analytic
                 solution could be obtained. Unfortunately, the analytic
                 solution often has little relation to the original
                 queueing system and consequently often produces
                 solutions with poor accuracy. We begin with a brief
                 introduction to the concepts of decomposition and
                 aggregation. Application of these and other approximate
                 methods to the analysis of computer systems are
                 discussed by Chandy and Sauer [CHAN78].",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Marie:1980:CEP,
  author =       "Raymond Marie",
  title =        "Calculating equilibrium probabilities for {$ \lambda
                 (n) / C_k / 1 / N $} queues",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "117--125",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806155",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Equilibrium state distributions are determined for
                 queues with load-dependent Poisson arrivals and service
                 time distributions representable by Cox's generalized
                 method of stages. The solution is obtained by
                 identifying a birth-death process that has the same
                 equilibrium state distribution as the original queue.
                 Special cases of two-stage (C2) and Erlang-k (Ek)
                 service processes permit particularly efficient
                 algorithms for calculating the load-dependent service
                 rates of the birth-death process corresponding to the
                 original queue. Knowing the parameters of the
                 birth-death process, the equilibrium state
                 probabilities can be calculated straight-forwardly.
                 This technique is particularly useful when subsystems
                 are reduced to flow-equivalent servers representing the
                 complementary network.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wagner:1980:HCS,
  author =       "Robert A. Wagner and Kishor S. Trivedi",
  title =        "Hardware configuration selection through discretizing
                 a continuous variable solution",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "127--142",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806156",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper extends a previous model for computer
                 system configuration planning developed by the authors.
                 The problem is to optimally select the CPU speed, the
                 device capacities, and file assignments so as to
                 maximize throughput subject to a fixed cost constraint.
                 We advocate solving this essentially discrete problem
                 in continuous variables followed by an appropriate
                 discretization. The discretization error thus committed
                 is analyzed in detail.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bard:1980:MSD,
  author =       "Yonathan Bard",
  title =        "A model of shared {DASD} and multipathing",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "143--143",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806157",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a model of an I/O subsystem in
                 which devices can be accessed from multiple CPUs and/or
                 via alternative channel and control unit paths. The
                 model estimates access response times, given access
                 rates for all CPU-device combinations. The systems
                 treated are those having the IBM System/370
                 architecture, with each path consisting of a CPU,
                 channel, control unit, head of string, and device with
                 rotational position sensing. The path selected for an
                 access at seek initiation time remains in effect for
                 the entire channel program. The computation proceeds in
                 three stages: First, the feasibility of the prescribed
                 access rates is determined by solving a linear
                 programming problem. Second, the splitting of access
                 rates among the available paths is determined so as to
                 satisfy the following principle: The probability of
                 selecting a given path is proportional to the
                 probability that the path is free. This condition leads
                 to a set of nonlinear equations, which can be solved by
                 means of the Newton--Raphson method. Third, the RPS hit
                 probability, i.e. the probability that the path is free
                 when the device is ready to transmit, is computed in
                 the following manner: From the point of view of the
                 selected path, the system may be viewed as being in one
                 of 25 possible states. There are twelve different
                 subsets of states whose aggregate probabilities can be
                 computed from the (by now) known flow rates over the
                 various paths. The maximum entropy principle is used to
                 calculate the unknown state probabilities, with the
                 known aggregate probabilities acting as constraints.
                 The required RPS hit probability can be computed easily
                 once the state probabilities have been determined.
                 Explicit formulas are given for all these quantities.
                 Empirically derived formulas are used to compute the
                 RPS miss probability on subsequent revolutions, given
                 the probability on the first revolution. The model is
                 validated against a simulator, showing excellent
                 agreement for systems with path utilizations up to 50
                 percent. The model is also validated against
                 measurements from a real three-CPU system with 31
                 shared devices. In this validation, the I/O subsystem
                 model acts as a common submodel to three copies of a
                 system model, one for each CPU. Estimated end-user
                 transaction response times show excellent agreement
                 with the live measurements.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lo:1980:CCP,
  author =       "T. L. Lo",
  title =        "Computer capacity planning using queueing network
                 models",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "145--152",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806158",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents several computer capacity planning
                 case studies using a modeling tool, BEST/1, derived
                 from the theory of queueing networks. All performance
                 predictions were evaluated based on the selected
                 service levels such as response times and throughputs.
                 Advantages and disadvantages of using the modeling
                 approach are also briefly discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kurinckx:1980:OVC,
  author =       "A. Kurinckx and G. Pujolle",
  title =        "Overallocation in a virtual circuit computer network",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "153--158",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806159",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper, we study the end-to-end control through
                 virtual circuits in a computer network built following
                 the X.25 Recommendations. We develop a mathematical
                 model to obtain the maximum overallocation of node
                 buffers, in order for the probability of overflow not
                 to exceed a given value.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Upton:1980:ADA,
  author =       "Richard A. Upton and Satish K. Tripathi",
  title =        "Analysis of design alternatives for a packet switched
                 {I/O} system",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "159--171",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806160",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes an application of analytical
                 modeling to the design and evaluation of a general
                 purpose, packet-switched image processing system that
                 will soon enter an implementation phase. A bottom-up
                 modeling approach is used to evaluate such design
                 issues as optimal packet size, optimal channel access
                 method(s), and required number of processors and disks.
                 Based on the characteristics of various hardware
                 components and the predicted workload, specific design
                 recommendations are made.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Balkovich:1980:PDS,
  author =       "Edward E. Balkovich and Colin Whitby-Strevens",
  title =        "On the performance of decentralized software",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "173--180",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806161",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Distribution of computing to achieve goals such as
                 enhanced reliability depend on the use of decentralized
                 software. Decentralization typically replaces a
                 sequential process by a system of small, concurrent
                 processes that interact frequently. The implementation
                 of processes and their interactions represents a cost
                 incurred as a result of decentralization. Performance
                 measurements are reported in this paper for
                 decentralized software written in a programming
                 language for distributed computer systems. These
                 performance measurements confirm that low-cost
                 implementations of concurrency are possible, but
                 indicate that decentralized software makes heavy use of
                 run-time functions managing concurrency. An initial
                 model comparing the performance of a specific
                 decentralized software structure to its centralized
                 counterpart indicates that these implementation costs
                 are generally offset by the performance improvements
                 that are due to the parallelism inherent in the
                 decentralized structure. The research facilities for
                 continued study of decentralized software performance
                 are described in the summary.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Concurrent software; Decentralized control;
                 Decentralized software; Distributed computer systems;
                 Performance measurement and evaluation",
}

@Article{Grit:1980:PMA,
  author =       "Dale H. Grit and Rex L. Page",
  title =        "Performance of a multiprocessor for applicative
                 programs",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "181--189",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806162",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Applicative programming languages provide
                 opportunities for parallel processing without requiring
                 the programmer to be concerned with explicit
                 synchronization of portions of the computation. We
                 present a computational model of a multiprocessor which
                 executes applicative programs, and we analyze the
                 expected performance of the model via simulation. As
                 the number of processors is doubled, elapsed execution
                 time is nearly halved, until system bottlenecks occur.
                 An alternative model is proposed which alleviates these
                 bottlenecks. The basis of the second model is an
                 interconnection switch which is characterized by $ \log
                 (n) $ access time and $ n \log (n) $ cost.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Dhas:1980:PEF,
  author =       "C. Retna Dhas",
  title =        "Performance evaluation of a feedback data flow
                 processor using simulation",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "191--197",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806163",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a method to estimate the
                 performance of a feedback data flow processor using
                 software simulation. A brief over view of a data flow
                 language and a data flow processor along with the
                 conceptual view of a software simulator are described.
                 Numerical measurements of parallelism and resources
                 requirements are obtained by translating high level
                 language programs to data flow language and then
                 executing them on the simulator.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bryant:1980:HMG,
  author =       "Raymond M. Bryant",
  title =        "On homogeneity in {M\slash G\slash 1} queueing
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "199--208",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806164",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Operational analysis replaces certain classical
                 gueueing theory assumptions with the condition of
                 ``homogeneous service times.'' In this paper, we show
                 that the sample paths of an M/G/1 queueing system have
                 this property with non-zero probability if and only if
                 the service time distribution is exponential. We also
                 consider the relationship of the operational
                 performance measures S(n) and the mean service time.
                 This relationship is shown to depend on the form of the
                 service distribution. It follows that using operational
                 analysis to predict the performance of an M/G/1
                 queueing system when the mean service time is changed
                 will be most successful when the service time
                 distribution is exponential. Simulation evidence is
                 presented which supports this claim.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Coffman:1980:ORP,
  author =       "E. G. {Coffman, Jr.} and Erol Gelenbe and Roger C.
                 Wood",
  title =        "Optimal replication of parallel-read, sequential-write
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "209--216",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806165",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Frequently used computer elements that can be written
                 by at most one process at a time constitute important
                 bottlenecks in multiprocessor system operation,
                 particularly when such elements are accessible only
                 serially. Hardware devices, data files, system tables
                 and critical sections in general may be examples of
                 such elements. One common way to relieve this
                 congestion is to provide several copies of the element,
                 which can then be read (used) in parallel. However, the
                 requirement that writing (changing) remain sequential
                 means that writing times increase with the number of
                 copies provided. The optimization question in this
                 trade-off is the main concern of this paper. A
                 probability model of such a system is formulated with
                 the objective of obtaining read-rate capacities as a
                 function of read/write loads and the number of copies
                 provided. The above optimization problem is expressed
                 in terms of these results and then solved. In
                 particular, it is shown how to select the number of
                 copies that maximizes the read-rate capacity for given
                 system parameters. Two distinct operating regimes,
                 based on how interrupted read operations are restarted,
                 are analyzed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Shore:1980:LRO,
  author =       "John E. Shore",
  title =        "The lazy repairman and other models: {Performance}
                 collapse due to overhead in simple, single-server
                 queuing systems",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "217--224",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806166",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider two simple models of overhead in batch
                 computer systems and demand access communications
                 systems. The first, termed ``modified M/M/1/K, ``is an
                 exponential, single-server queuing system with finite
                 storage capacity, constant arrival rate, and
                 queue-length-dependent service time. We consider cases
                 in which the expected service time consists of a
                 constant plus a term that grows linearly or
                 logarithmically with the queue length. We show that the
                 performance of this system --- as characterized by the
                 expected number of customers in the system, the
                 expected time in the system, and the rate of missed
                 customers --- can collapse as the result of small
                 changes in the arrival rate, the overhead rate, or the
                 queue capacity. The system has the interesting property
                 that increasing the queue capacity can decrease
                 performance. In addition to equilibrium results, we
                 consider the dynamic behavior of the model. We show
                 that the system tends to operate in either of two
                 quasi-stable modes of operation --- one with low queue
                 lengths and one with high queue lengths. System
                 behavior is characterized by long periods of operation
                 in both modes with abrupt transitions between them. We
                 point out that the performance of a saturated system
                 may be improved by dynamic operating procedures that
                 return the system to the low mode. In the second model,
                 termed the ``lazy repairman, ``the single server has
                 two distinct states: the ``busy'' state and the
                 ``lazy'' state. Customers receive service only when the
                 server is in the busy state; overhead is modeled by
                 attributing time spent in the lazy state to overhead
                 functions. When the expected time spent in the lazy
                 state increases with the number of customers waiting
                 for service, the behavior of the lazy repairman model
                 is similar to the modified M/M/1/K, although the lazy
                 repairman model makes it easier to study in detail the
                 effects of overhead.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lam:1980:RTD,
  author =       "Simon S. Lam and A. Udaya Shankar",
  title =        "Response time distributions for a multi-class queue
                 with feedback",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "225--234",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806167",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A single server queue with feedback and multiple
                 customer classes is analyzed. Arrival processes are
                 independent Poisson processes. Each round of service is
                 exponentially distributed. After receiving a round of
                 service, a customer may depart or rejoin the end of the
                 queue for more service. The number of rounds of service
                 required by a customer is a random variable with a
                 general distribution. Our main contribution is
                 characterization of response time distributions for the
                 customer classes. Our results generalize in some
                 respects previous analyses of processor-sharing models.
                 They also represent initial efforts to understand
                 response time behavior along paths with loops in local
                 balanced queueing networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wang:1980:AIO,
  author =       "Y. T. Wang",
  title =        "Analysis of an intrinsic overload control for a class
                 of queueing systems",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "235--243",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806168",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider a priority queueing system which consists
                 of two queues sharing a processor and in which there is
                 delayed feedback. Such a model arises from systems
                 which employ a priority assignment scheme to achieve
                 overload control. An analytic expression for the
                 stationary probability of the queue lengths is derived.
                 An algorithm is proposed to compute the queue lengths
                 distribution. Some numerical results are illustrated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Smith:1980:ASD,
  author =       "Connie Smith and J. C. Browne",
  title =        "Aspects of software design analysis: {Concurrency} and
                 blocking",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "245--253",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1009375.806169",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper extends previous work on development of a
                 methodology for the prediction of the performance of
                 computer software systems from design level
                 specifications and continuing through implementation.
                 The effects of synchronized behavior, such as results
                 from data reservation in multi-thread executions of
                 data base systems, and competition for host system
                 resources are incorporated. The previous methodology
                 uses hierarchical graphs to represent the execution of
                 software on some host computer system (or on some
                 abstract machine). Performance metrics such as response
                 time were obtained from analysis of these graphs
                 assuming execution of a single copy on a dedicated
                 host. This paper discusses the mapping of these
                 execution graphs upon queueing network models of the
                 host computing environment to yield performance metric
                 estimates for more complex and realistic processing
                 environments.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Potier:1980:ALP,
  author =       "D. Potier and Ph. Leblanc",
  title =        "Analysis of locking policies in database management
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "255--255",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1009375.806170",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Quantitative analysis of locking mechanisms and of
                 their impact on the performance of transactionnal
                 systems have yet received relatively little attention.
                 Although numerous concurrency mechanisms have been
                 proposed and implemented, there is an obvious lack of
                 experimental as well as analytical studies of their
                 behaviour and their influence on system performance. We
                 present in this paper an analytical framework for the
                 performance analysis of locking mechanisms in
                 transactionnal systems based on hierarchical analytical
                 modelling. Three levels of modelling are considered: at
                 level 1, the different stages (lock request, execution,
                 blocking) transactions of through during their
                 life-time are described; the organization and
                 operations of the CPU and I/O resources are analysed at
                 level 2; transaction's behaviour during their lock
                 request phase is analysed at modelling level 3. This
                 hierarchical approach is applied to the analysis of a
                 physical locking scheme involving a static lock
                 acquisition policy. A simple probabilistic model of the
                 transaction behaviour is used to derived the
                 probability that a new transaction is granted the locks
                 it requests given the number of transactions already
                 active as a function of the granularity of the
                 database. On the other hand, the multiprogramming
                 effect due to the sharing of CPU and I/O resources by
                 transactions is analysed using the standard queueing
                 network approaches and the solution package QNAP. In a
                 final step, the results on the blocking probabilities
                 and the multiprogramming effect are used as input of a
                 global performance model of the transactionnal system.
                 Markovian analysis is used to solve this model and to
                 obtain the throughput of the system as a function of
                 the data base granularity and other parameters. The
                 results obtained provide a clear understanding of the
                 various factors which determine the global performance,
                 of their role and importance. They also raise many new
                 issues which can only be solved by further extensive
                 experimental and analytical studies and show that two
                 particular topics deserve special attention: the
                 modelling of transaction behaviour and the modelling of
                 locking overheads.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Coffman:1980:ONC,
  author =       "E. G. {Coffman, Jr.} and E. Gelenbe and B. Plateau",
  title =        "Optimization of the number of copies in a distribution
                 data base",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "257--263",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806171",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider the effect on system performance of the
                 distribution of a data base in the form of multiple
                 copies at distinct sites. The purpose of our analysis
                 is to determine the gain in READ throughput that can be
                 obtained in the presence of consistency preserving
                 algorithms that have to be implemented when UPDATE
                 operations are carried out on each copy. We show that
                 READ throughput diminishes if the number of copies
                 exceeds an optimal value. The theoretical model we
                 develop is applied to a system in which consistency is
                 preserved through the use of Ellis's ring algorithm.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ruschitzka:1980:RJC,
  author =       "Manfred Ruschitzka",
  title =        "The response of job classes with distinct policy
                 functions (Extended Abstract)",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "265--265",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806172",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Policy function schedulers provide a flexible
                 framework for implementing a wide range of different
                 scheduling schemes. In such schedulers, the priority of
                 a job at any instant in time is defined by the
                 difference between the time it spent in the system and
                 an arbitrary function of its attained service time. The
                 latter is called the policy function and acts as the
                 functional parameter that specifies a particular
                 scheduling scheme. For instance, a constant policy
                 function specifies the first-come, first-serve
                 scheduling scheme. By changing the policy function, the
                 system behavior can be adjusted to better conform with
                 desired response characteristics. It is common to
                 express response characteristics in terms of a response
                 function, the average response time of a job
                 conditioned on its service requirement in equilibrium.
                 In this paper, we analyze processor-sharing M/G/1
                 systems in which the priorities of different classes of
                 jobs are determined by distinct policy functions.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kim:1980:PTO,
  author =       "K. H. Kim and Mahmoud Naghibzadeh",
  title =        "Prevention of task overruns in real-time
                 non-preemptive multiprogramming systems",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "267--276",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1009375.806173",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Real-time multiprogramming systems, in which a
                 hardware processor is dynamically assigned to run
                 multiple software processes each designed to control an
                 important device (user), are considered. Each software
                 process executes a task in response to a service
                 request repeatedly coming from the corresponding user.
                 Each service task is associated with a strict deadline,
                 and thus the design problem that we are concerned with
                 is to ensure that the service tasks requested can
                 always be executed within the associated deadlines,
                 i.e., no task overrun occurs. This problem was studied
                 by several investigators for the cases where preemptive
                 scheduling strategies are used. In contrast, very few
                 studies have been conducted for cases of non-preemptive
                 scheduling. In this paper we show that a non-preemptive
                 strategy, called relative urgency non-preemptive (RUNP)
                 strategy, is optimal in the sense that if a system runs
                 without a task overrun under any non-preemptive
                 strategy, it will also run without a task overrun under
                 the RUNP strategy. Then an efficient procedure used at
                 the design time for detecting the possibility of a task
                 overrun in a system using the RUNP strategy is
                 presented. The procedure is useful in designing
                 overrun-free real-time multiprogramming systems that
                 yield high processor utilizations. Some special types
                 of systems using the RUNP strategy for which even
                 simpler detection procedures are available are also
                 discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Non-preemptive scheduling; Real-time multiprogramming;
                 Relative urgency; Task overrun; Time critical process",
}

@Article{King:1980:NMI,
  author =       "P. J. B. King and I. Mitrani",
  title =        "Numerical methods for infinite {Markov} processes",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "277--282",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806174",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The estimation of steady state probability
                 distributions of discrete Markov processes with
                 infinite state spaces by numerical methods is
                 investigated. The aim is to find a method applicable to
                 a wide class of problems with a minimum of prior
                 analysis. A general method of numbering discrete states
                 in infinite domains is developed and used to map the
                 discrete state spaces of Markov processes into the
                 positive integers, for the purpose of applying standard
                 numerical techniques. A method based on a little used
                 theoretical result is proposed and is compared with two
                 other algorithms previously used for finite state space
                 Markov processes.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Fayolle:1980:SCT,
  author =       "G. Fayolle and P. J. B. King and I. Mitrani",
  title =        "The solution of certain two-dimensional {Markov}
                 models",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "2",
  pages =        "283--289",
  month =        "Summer",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800199.806175",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:54:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A class of two-dimensional Birth-and-Death processes,
                 with applications in many modelling problems, is
                 defined and analysed in the steady-state. These are
                 processes whose instantaneous transition rates are
                 state-dependent in a restricted way. Generating
                 functions for the steady-state distribution are
                 obtained by solving a functional equation in two
                 variables. That solution method lends itself readily to
                 numerical implementation. Some aspects of the numerical
                 solution are discussed, using a particular model as an
                 example.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Clark:1980:EIE,
  author =       "Jon D. Clark and Robert M. Golladay",
  title =        "Empirical investigation of the effectiveness of
                 several computer performance evaluation tools",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "3",
  pages =        "31--36",
  month =        "Fall",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041883.1041884",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A number of tools exist for computer selection
                 evaluation. The operational cost of applying these vary
                 considerably as does the precision of the performance
                 prediction. This paper compares the precision of
                 several commonly used methods in a single test case,
                 namely cycle time, instruction mix analysis and
                 benchmarking.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "benchmark; computer; cycle time; instruction mix;
                 performance evaluation",
}

@Article{Estell:1980:BW,
  author =       "Robert G. Estell",
  title =        "Benchmarks and watermarks",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "3",
  pages =        "39--44",
  month =        "Fall",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041883.1041885",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Historically, benchmark tests have been one of several
                 ways to size a computer system, and measure its
                 performance. Today, it is more important to test the
                 System Software than the machine hardware. (Thus the
                 term `watermark' (as on bond paper) for software
                 tests.) Watermarks of software suffer the same
                 limitations and risks as benchmarks of hardware: e.a.,
                 they should be supplemented with simulations, models,
                 and other analysis and design tools of our trade.
                 Perhaps most significantly, watermarks, like
                 benchmarks, can be biased by their creators.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kleijnen:1980:SMM,
  author =       "J. P. C. Kleijnen",
  title =        "Scoring methods, multiple criteria, and utility
                 analysis",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "3",
  pages =        "45--56",
  month =        "Fall",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041883.1041886",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:53 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Scoring methods are popular in computer selection, and
                 try to combine different attributes into an overall
                 performance measure. Related is the multi-criteria
                 evaluation of computerized information systems. The
                 scoring method is criticized in the context of more
                 general utility models, popular in economics. Scoring
                 provides simplistic choice models, and should not be
                 used as predictive, causal models. Many references for
                 further study are included.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Denning:1980:TTI,
  author =       "Peter J. Denning",
  title =        "A tale of two islands: a fable",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "4",
  pages =        "7--10",
  month =        "Winter",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041888.1041889",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Once upon a time there were two islands. One was
                 called Stochasia. Its citizens were well cultured and
                 they had achieved high development in a system of
                 mathematics for random processes. The other island was
                 called Operatia. Its citizens were well cultured and
                 they had achieved high development in a system for
                 experimentation with nondeterminate phenomena. Both
                 civilizations were closed societies. Neither knew of
                 the other's existence, and it had been so since the
                 beginning of time. Neither would ever have known, had
                 it not been for the events I will describe shortly.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Yader:1980:ACP,
  author =       "Mark J. Yader",
  title =        "{ADP} capacity planning: a case study",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "4",
  pages =        "11--25",
  month =        "Winter",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041888.1041890",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A case study of short-range ADP capacity planning is
                 presented and related to the process of long-range
                 planning. Short-range capacity planning is concerned
                 with identification of computer and communication
                 resources which will reach saturation levels in the
                 near future. The initial step in the short-range
                 planning process is to evaluate the performance of the
                 user's current system configuration and one or more
                 configuration enhancements with respect to their
                 effectiveness in supporting a projected workload.
                 Central to long-range planning is the evaluation of a
                 broader range of architectural alternatives, including
                 various distributed processing design. In both short
                 range and long range planning, system modeling is a
                 basic tool for evaluating alternatives. An analytic
                 network of queues model has been developed to reflect
                 both centralized and hierarchically distributed network
                 architectures. The application of the tool as part of
                 the short-range case study is described.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Marrevee:1980:HFF,
  author =       "J. Marrev{\'e}e",
  title =        "How friendly and fast is {FAST DUMP RESTORE}",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "4",
  pages =        "28--35",
  month =        "Winter",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041888.1041891",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "FAST DUMP RESTORE, shortly named FDR, is a very well
                 known software package, delivered by the software house
                 Innovation Data Processing, and in some countries of
                 Europe commercially supported by Westinghouse. This
                 package is used in many computer centres using one of
                 IBM's big operating systems e.g. MVT or MVS. According
                 to Innovation's own remarks it became one of the most
                 successful software products in the world with about
                 3000 users, and since 1974 it is every year on the
                 DATAPRO HONOR ROLL. It should, among others, provide
                 superior performance on creation of dumps or restores
                 of disk packs.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bernard:1980:SUM,
  author =       "J. C. Bernard",
  title =        "{T-scan}: the use of micro computers for response time
                 measurements",
  journal =      j-SIGMETRICS,
  volume =       "9",
  number =       "4",
  pages =        "39--50",
  month =        "Winter",
  year =         "1980",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041888.1041892",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:55:57 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "All large computer centers are actually faced with a
                 major change in their workload. Most applications are
                 leaving batch operations for time sharing ease of use.
                 In fact, all kinds of computer work could be performed
                 through a console: development, maintenance, data base
                 query and update and even batch control and submit. A
                 second problem arises as end-user profile is no more
                 computer oriented. Users only look at the time the
                 system needs to answer their requests, and don't care
                 about the computer game. So performance analysts and
                 operations managers are supposed to achieve a certain
                 level of service which they are almost unable to
                 measure. We try in this paper to discuss some major
                 problems related to conversational computer operations.
                 We will present several drawbacks characterising the
                 currently existing solutions. A problem that lead us to
                 define simple operating principle for response time
                 measurements. This principle is implemented in a fully
                 automatic measurement tool named T-SC",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bell:1981:SLC,
  author =       "Thomas E. Bell",
  title =        "Structured life-cycle assumptions",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "1--3",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807901",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "New programmers, some managers, and lots of users
                 don't understand the advantages of a structured
                 software life-cycle. However, only a single experience
                 with coding while designing will convince any incipient
                 software engineer that a controlled process is needed
                 from the time of system concept though the last
                 maintenance phase. Software Configuration Management
                 has become almost a religion, and EDP auditors have
                 even encountered a few systems that appear to have been
                 specified, then designed, then implemented, then
                 tested, and finally installed --- all before
                 maintenance and redefinition occurred. Perhaps the
                 millennium has finally arrived, and software people
                 will soon live in a controlled world with rational
                 practices. If you are tempted to believe the foregoing
                 prediction, read the latest issue of FORTUNE, the WALL
                 STREET JOURNAL, or COMMERCE BUSINESS DAILY and note a
                 few problems that may divert us from the path to
                 Nirvana. Data Processing supports commercial,
                 educational, industrial, and governmental activities
                 that are frequently (and repeatedly) redirected. Under
                 circumstances of a largely random environment with
                 thorough business planning a rarity, a critical support
                 activity can expect to be redirected frequently. New
                 ideas will be sliced into partly-completely DP
                 projects, and users ``analytical analyses'' will become
                 DP systems as if by magic.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Coughlin:1981:SDM,
  author =       "Donald T. Coughlin",
  title =        "System development methodology or system research
                 methodology?",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "5--6",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807902",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A future data processing historian may someday point
                 to the 1970s as the decade when business application
                 systems began their adolescent growth period. We
                 entered the 1970s with few truly on-line business
                 systems, and many application designers did not fully
                 appreciate the capabilities and limitation of index
                 sequential file structures. Many of the larger
                 companies were busy writing their own tp monitors and
                 file handling systems, and it is very possible that
                 more professional hours were being devoted to the
                 development of control program software than to
                 applications software. The last decade did provide the
                 application programmer with new control program tools
                 such as data base management systems and on-line
                 terminal control software. It also generated a
                 continuing demand for computer performance software
                 specialists to tune application systems immediately
                 after initial implementation. These performance tuning
                 efforts often required substantial changes to the
                 application system --- not just program code but also
                 basic redesign. Therefore were these really system
                 development projects or were they system research
                 projects?",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Earle:1981:AAB,
  author =       "Dennis M. Earle",
  title =        "An alchemical approach to brokerage",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "7--8",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807903",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The essence of the commodities business is the ability
                 to react quickly to evolving market conditions.
                 Mocatta, a N.Y. based bullion dealer, is a firm which
                 uses its Data Processing to provide both front office
                 (trading) flexibility and back-office capacity to
                 handle large volume days. The business is characterized
                 by the constant trade-off of time against money.
                 Corporate philosophy is to spend money to react quickly
                 rather than to react slowly but perhaps at lower costs.
                 The life cycle of a system in this environment normally
                 begins with a marketing report reflecting a new market
                 niche which the firm can take advantage of. Data
                 Processing is involved almost from the inception of the
                 idea to provide an indication of what existing systems
                 can do for this new opportunity. Because of the nature
                 of the business, each new product offered is usually so
                 unique as to make it impossible for existing systems to
                 support a new product from a trading point of view.
                 Back-office applications are somewhat more common
                 across products, so existing systems can usually
                 provide some support. The key point is that all we
                 really know is that we want to market the new product.
                 Some idea of the time frame in which the product is to
                 be offered is also obtained. The exact workings of
                 defining the product and determining the parameters
                 under which it will be traded usually remain to be
                 worked out prior to the offering date. This therefore
                 means that we have, at the point of commitment, the
                 necessity for evolving data processing support in the
                 same time frame in which the definition is evolving
                 about what it is that we are to support.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Spiegel:1981:PAI,
  author =       "Mitchell G. Spiegel",
  title =        "Prototyping: an approach to information and
                 communication system design",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "9--19",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807904",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes prototyping, a state-of-the-art
                 methodology to assist a design team in making a through
                 definition and analysis of new requirements,
                 feasibility, alternative selections, workload impact,
                 system and/or application specification,
                 implementation, and testing. Suggested prototype tools
                 and techniques are presented, and guidance is included
                 to aid a design team in obtaining accurate and timely
                 results. This paper is not intended to be a complete
                 text on design. It should be enhanced with a design
                 team's expertise, consultation from sources with design
                 experience, and reference to other design literature.
                 Prototyping is a process (the act, study, or skill) of
                 modeling an information-communication system
                 architecture in one or more levels of detail, using
                 descriptive models, abstract models, and working models
                 of the system and its component parts (synonym:
                 archetyping). This work was completed while the author
                 was working with prior employers.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Jenkins:1981:APC,
  author =       "C. Wesley Jenkins",
  title =        "Application prototyping: a case study",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "21--27",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807905",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Accurate specification of user requirements for
                 interactive systems is especially difficult in an
                 environment where the demand for information is
                 intense, short-fused and largely unpredictable. The
                 Congressional Budget Office was created in 1975 by an
                 Act of Congress. Its primary mandate is to serve the
                 Budget and Appropriation committees of both the Senate
                 and the House of Representatives. The Act also defined
                 a Congressional Budget process specifying a calendar of
                 events and specific completion dates for major
                 activities. This placing of budgetary actions produces
                 a highly charged environment in which CBO must be able
                 to respond immediately to information needs with
                 information that is both accurate and consistent.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cox:1981:SRT,
  author =       "Patricia R. Cox",
  title =        "Specification of a regression test for a mini computer
                 operating system",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "29--32",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807906",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper I describe the practical problems of
                 designing a regression test set for an existing
                 mini-computer operating system. The ideal regression
                 test would test each function with all possible
                 combinations of the options for each variation of the
                 operating system. This is impractical if not impossible
                 so the alternative is to choose the individual cases
                 for maximum coverage. To do that the system is viewed
                 both functionally and structurally and cases are
                 selected for inclusion in the test set. The method of
                 selecting the tests is described along with the tools
                 that will be needed to measure the coverage and to
                 maintain the test set.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bryan:1981:ASC,
  author =       "William Bryan and Stanley Siegel and Gary
                 Whiteleather",
  title =        "An approach to software configuration control",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "33--47",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807907",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The purpose of this paper is to discuss the process by
                 which a system's life cycle and its associated life
                 cycle products are managed to ensure the quality and
                 integrity of the system. We call this process
                 configuration control. Although many of the ideas in
                 this paper are applicable to systems in general, the
                 focus of this paper is on configuration control of
                 systems with software content. It is becoming apparent
                 to many, in both government and private industry, that
                 the high cost of maintenance of existing computer
                 systems may be attributed to poor configuration control
                 early in the system's life cycle. For example, in an
                 article entitled `A Corporate Road, Map for Systems
                 Development in the `80s, the following claim appears.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Fredrick:1981:PIS,
  author =       "C. R. Fredrick",
  title =        "Project implementation of {Software Configuration
                 Management}",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "49--56",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807908",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Have you or one of your programmers said: ``The system
                 ran yesterday; I only changed one line.'' or ``I spent
                 my budget, but I'm not done.'' or ``I fixed that
                 problem yesterday, but it's back now.'' or ``I thought
                 it would be a nice feature for the operator, so I added
                 it to the program.'' or ``Why was this line of code
                 changed? Who did it and when?''? If these or other
                 similar statements are familiar, then Software
                 Configuration Management is a subject that should
                 interest you. Software Configuration Management (SCM)
                 is a management method that establishes a discipline
                 for the software development process and provides
                 visibility to that process. The step by step procedures
                 used by a large software organization to resolve some
                 of their development problems will be followed here.
                 The result of their efforts was the formulation of a
                 management method that significantly improved the
                 quality of their software products and reduced the
                 costs. It was learned later that other software
                 organizations had gone through similar processes and
                 arrived at similar results. This new tool is now known
                 as Software Configuration Management.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Berlack:1981:ISC,
  author =       "H. Ronald Berlack",
  title =        "Implementing software configuration control in the
                 structured programming environment",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "57--77",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807909",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The fundamental problems in the control of software
                 are explored. The elements of control as they relate to
                 communications is defined, and the implementation of
                 these elements in solving the fundamental problems and
                 achieving optimal control during a software development
                 life cycle, is explained. Control is defined as a
                 vehicle for communicating changes to established,
                 agreed-upon baseline points, made up of documents and
                 subsequent computer programs. By communicating change
                 to those involved or affected, and obtaining agreement
                 of the change, one achieves a degree of control that
                 does not inhibit software engineering innovation or
                 progress, but helps maintain the project's prime
                 objectives to deliver maintainable, error-free software
                 to the ultimate user.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gross:1981:PCV,
  author =       "Peter Gross",
  title =        "Producers and consumers views of software quality
                 (Panel Session)",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "79--79",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807910",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "At this very ACM workshop/symposium indicates,
                 software quality is of great concern to both producers
                 and users of software. It should be obvious to those
                 who have attended the earlier sessions today and to
                 those who will attend the sessions tomorrow that
                 quality is something that cannot be tested into a
                 system or added to a system. It must be integral from
                 the start of the definition of the system's
                 requirements through each phase of analysis, design,
                 implementation, integration, testing, and installation.
                 Software quality implies an engineering type approach
                 to the development of software. It implies the use of a
                 disciplined development environment, and the use of
                 tools and techniques to provide assurances throughout
                 the software development process that both the software
                 and its baseline specifications are complete,
                 consistent, and traceable from one to another.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Henry:1981:RAT,
  author =       "Sallie Henry and Dennis Kafura and Kathy Harris",
  title =        "On the relationships among three software metrics",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "81--88",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807911",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Automatable metrics of software quality appear to have
                 numerous advantages in the design, construction and
                 maintenance of software systems. While numerous such
                 metrics have been defined, and several of them have
                 been validated on actual systems, significant work
                 remains to be done to establish the relationships among
                 these metrics. This paper reports the results of
                 correlation studies made among three complexity metrics
                 which were applied to the same software system. The
                 three complexity metrics used were Halstead's effort,
                 McCabe's cyclomatic complexity and Henry and Kafura's
                 information flow complexity. The common software system
                 was the UNIX operating system. The primary result of
                 this study is that Halstead's and McCabe's metrics are
                 highly correlated while the information flow metric
                 appears to be an independent measure of complexity.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Szulewski:1981:MSS,
  author =       "Paul A. Szulewski and Mark H. Whitworth and Philip
                 Buchan and J. Barton DeWolf",
  title =        "The measurement of software science parameters in
                 software designs",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "89--94",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807912",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Metrics of software quality have historically focused
                 on code quality despite the importance of early and
                 continuous quality evaluation in a software development
                 effort. While software science metrics have been used
                 to measure the psychological complexity of computer
                 programs as well as other quality related aspects of
                 algorithm construction, techniques to measure software
                 design quality have not been adequately addressed. In
                 this paper, software design quality is emphasized. A
                 general formalism for expressing software designs is
                 presented, and a technique for identifying and counting
                 software science parameters in design media is
                 proposed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Basili:1981:ECS,
  author =       "Victor R. Basili and Tsai-Yun Phillips",
  title =        "Evaluating and comparing software metrics in the
                 software engineering laboratory",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "95--106",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807913",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There has appeared in the literature a great number of
                 metrics that attempt to measure the effort or
                 complexity in developing and understanding software\1.
                 There have also been several attempts to independently
                 validate these measures on data from different
                 organizations gathered by different people\1. These
                 metrics have many purposes. They can be used to
                 evaluate the software development process or the
                 software product. They can be used to estimate the cost
                 and quality of the product. They can also be used
                 during development and evolution of the software to
                 monitor the stability and quality of the product. Among
                 the most popular metrics have been the software science
                 metrics of Halstead, and the cyclomatic complexity
                 metric of McCabe. One question is whether these metrics
                 actually measure such things as effort and complexity.
                 One measure of effort may be the time required to
                 produce a product. One measure of complexity might be
                 the number of errors made during the development of a
                 product. A second question is how these metrics compare
                 with standard size measures, such as the number of
                 source lines or the number of executable statements,
                 i.e., do they do a better job of predicting the effort
                 or the number of errors? Lastly, how do these metrics
                 relate to each other?",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ronback:1981:TMS,
  author =       "James Ronback",
  title =        "Test metrics for software quality",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "107--107",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807914",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper discusses Bell Northern Research's
                 experience in utilizing an extended set of test metrics
                 for assuring the quality of software. The theory and
                 use of branch and path class coverage is discussed and
                 the reaction of users in described. This paper also
                 discusses the effect of using co-resident inspection
                 procedures in achieving cost-effective testing for a
                 high degree of test coverage.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Benson:1981:AST,
  author =       "J. P. Benson",
  title =        "Adaptive search techniques applied to software
                 testing",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "109--116",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807915",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An experiment was performed in which executable
                 assertions were used in conjunction with search
                 techniques in order to test a computer program
                 automatically. The program chosen for the experiment
                 computes a position on an orbit from the description of
                 the orbit and the desired point. Errors were interested
                 in the program randomly using an error generation
                 method based on published data defining common error
                 types. Assertions were written for program and it was
                 tested using two different techniques. The first
                 divided up the range of the input variables and
                 selected test cases from within the sub-ranges. In this
                 way a ``grid'' of test values was constructed over the
                 program's input space. The second used a search
                 algorithm from optimization theory. This entailed using
                 the assertions to define an error function and then
                 maximizing its value. The program was then tested by
                 varying all of them. The results indicate that this
                 search testing technique was as effective as the grid
                 testing technique in locating errors and was more
                 efficient. In addition, the search testing technique
                 located critical input values which helped in writing
                 correct assertions.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Paige:1981:DST,
  author =       "Michael Paige",
  title =        "Data space testing",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "117--127",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807916",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A complete software testing process must concentrate
                 on examination of the software characteristics as they
                 may impact reliability. Software testing has largely
                 been concerned with structural tests, that is, test of
                 program logic flow. In this paper, a companion software
                 test technique for the program data called data space
                 testing is described. An approach to data space
                 analysis is introduced with an associated notation. The
                 concept is to identify the sensitivity of the software
                 to a change in a specific data item. The collective
                 information on the sensitivity of the program to all
                 data items is used as a basis for test selection and
                 generation of input values.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Goel:1981:OTP,
  author =       "Amrit L. Goel",
  title =        "Optimal testing policies for software systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "129--130",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807918",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An important problem of practical concern is to
                 determine how much testing should be done before a
                 system is considered ready for release. This decision,
                 of course, depends on the model for the software
                 failure phenomenon and the criterion used for
                 evaluating system readiness. In this paper, we first
                 develop a cost model based on the time dependent
                 failure rate function of Goel and Okumoto. Next, we
                 derive policies that yield the optimal values of the
                 level of test effort (b*) and software release time
                 (T*). The sensitivity of the optimal solution is also
                 numerically evaluated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Littlewood:1981:BDD,
  author =       "B. Littlewood",
  title =        "A {Bayesian} differential debugging model for software
                 reliability",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "129--130",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807919",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An assumption commonly made in early models of
                 software reliability is that the failure rate of a
                 program is a constant multiple of the number of bugs
                 remaining. This implies that all bugs have the same
                 effect upon the overall failure rate. The assumption is
                 challenged and an alternative proposed. The suggested
                 model results in earlier bug-fixes having a greater
                 effect than later ones (the worst bug show themselves
                 earlier and so are fixed earlier), and the DFR properly
                 between bug-fixes (confidence in programs increases
                 during periods of failure-free operation, as well as at
                 bug-fixes). The model shows a high degree of
                 mathematical tractability, and allows a range of
                 reliability, and allows a range of reliability measures
                 to be calculated exactly. Predictions of total
                 execution time to achieve a target reliability, are
                 obtained.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Musa:1981:SRMa,
  author =       "J. D. Musa and A. Iannino",
  title =        "Software reliability modeling accounting for program
                 size variation due to integration or design changes",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "129--130",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807920",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Estimation of software reliability quantities has
                 traditionally been on stable systems; i.e., systems
                 that are completely integrated and are not undergoing
                 design changes. Also, it is assumed that test results
                 are completely inspected for failures. This paper
                 describes a method for relaxing the foregoing
                 conditions by adjusting the lengths of the intervals
                 between failures experienced in tests as compensation.
                 The resulting set of failure intervals represents the
                 set that would have occurred for a stable system in its
                 final configuration with complete inspection. The
                 failure intervals are then processed as they would be
                 for a complete system. The approach is developed for
                 the execution time theory of software reliability, but
                 the concepts could be applied to many other models the
                 estimation of quantities of interest to the software
                 manager are illustrated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Musa:1981:SRMb,
  author =       "John D. Musa",
  title =        "Software reliability measurement session",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "129--130",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807917",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Many people think of reliability as a devoutly wished
                 for but seldom present attribute of a program. This
                 leads to the idea that one should make a program as
                 reliable as one possibly can. Unfortunately, in the
                 real world software reliability is usually achieved at
                 the expense of some other characteristic of the product
                 such as program size, run or response time,
                 maintainability, etc. or the process of producing the
                 product such as cost, resource requirements,
                 scheduling, etc. One wishes to make explicit trade-offs
                 among the software product and process rather than let
                 them happen by chance. Such trade-offs imply the need
                 for measurement. Because of mounting development and
                 operational costs, pressures for obtaining better ways
                 of measuring reliability, have been mounting. This
                 session deals with this crucial area.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Goel:1981:WST,
  author =       "Amrit L. Goel and Kazuhira Okumoto",
  title =        "When to stop testing and start using software?",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "131--138",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807921",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "During the last decade, numerous studies have been
                 undertaken to quantify the failure process of large
                 scale software systems. (see for example, references
                 1-12.) An important objective of these studies is to
                 predict software performance and use the information
                 for decision making. An important decision of practical
                 concern is the determination of the amount of time that
                 should be spent in testing. This decision of course
                 will depend on the model used for describing the
                 failure phenomenon and the criterion used for
                 determining system readiness. In this paper we present
                 a cost model based on the time dependent fault
                 detection rate model of Goel and Okumoto (4,5) and
                 describe a policy that yields the optimal value of test
                 time T.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Littlewood:1981:SRG,
  author =       "B. Littlewood",
  title =        "Stochastic reliability growth: a model with
                 applications to computer software faults and hardware
                 design faults",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "139--152",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807922",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An assumption commonly made in early models of
                 software reliability is that the failure rate of a
                 program is a constant multiple of the number of faults
                 remaining. This implies that all faults have the same
                 effect upon the overall failure rate. The assumption is
                 challenged and an alternative proposed. The suggested
                 model results in earlier fault-fixes having a greater
                 effect than later ones (the worst faults show
                 themselves earlier and so are fixed earlier), and the
                 DFR property between fault-fixes (confidence in
                 programs increases during periods of failure-free
                 operations, as well as at fault-fixes). The model shows
                 a high degree of mathematical tractability, and allows
                 a range of reliability measures to be calculated
                 exactly. Predictions of total execution time to achieve
                 a target reliability, and total number of fault-fixes
                 to target reliability, are obtained. It is suggested
                 that the model might also find applications in those
                 hardware reliability growth situations where design
                 errors are being eliminated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Decreasing failure rate; Design debugging; Design
                 errors; Pareto Distribution; Probability distribution
                 mixture; Programming debugging modelling; Reliability
                 growth; Software errors; Software failure rate;
                 Software faults; Software mttf; Software reliability",
}

@Article{Ottenstein:1981:SDS,
  author =       "Linda M. Ottenstein",
  title =        "Software defects --- a software science perspective",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "153--155",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807923",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper gives a model for computing the programming
                 time. The results of tests with programs in APL, BASIC,
                 and FORTRAN are also given and discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ottenstein:1981:PNE,
  author =       "Linda Ottenstein",
  title =        "Predicting numbers of errors using software science",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "157--167",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807924",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An earlier paper presented a model based on software
                 science metrics to give quantitative estimate of the
                 number of bugs in a programming project at the time
                 validation of the project begins. In this paper, we
                 report the results from an attempt to expand the model
                 to estimate the total number of bugs to expect during
                 the total project development. This new hypothesis has
                 been tested using the data currently available in the
                 literature along with data from student projects. The
                 model fits the published data reasonably well, however,
                 the results obtained using the student data are not
                 conclusive.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Schneider:1981:SEE,
  author =       "Victor Schneider",
  title =        "Some experimental estimators for developmental and
                 delivered errors in software development projects",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "169--172",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807925",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Experimental estimators are presented relating the
                 expected number of software problem reports (B) in a
                 software development project to the overall reported
                 professional effort (E) in ``man months'' the number of
                 subprograms (n) the overall count of thousands of coded
                 source statements of software(S). [equation] These
                 estimators are shown to be consistent with data
                 obtained from the Air Force's Rome Air Development
                 Center, the Naval Research Laboratory, and Japan's
                 Fujitsu Corporation. Although the results are
                 promising, more data is needed to support the validity
                 of these estimators.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sneed:1981:SSA,
  author =       "H. Sneed",
  title =        "{SOFTDOC} --- {A} system for automated software static
                 analysis and documentation",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "173--177",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010627.807926",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The code itself is useless without adequate
                 documentation. Besides that it is almost impossible to
                 validate and verify code unless it is properly
                 documented. Yet most of the attention of the past has
                 been devoted to producing code and little to producing
                 the documentation although it is obvious that it is
                 necessary both for testing and maintaining the software
                 product. Software documentation can be classified
                 according to its usage. Thus, there is a functional
                 documentation for describing what a system does and
                 what it is used for, and technical documentation for
                 describing how the software is constructed and how it
                 performs its functions. The former is directed toward
                 the user, the latter toward the tester and maintainer.
                 The two are, however, highly interrelated. Since the
                 programmer seldom writes the user documentation it is
                 necessary for those who describe what the system does,
                 to know how it does it. An accurate technical
                 documentation is a prerequisite for producing accurate
                 user documentation. Finally it serves yet another
                 purpose. Without it, it is not possible to control the
                 quality of the software. Software Quality Control
                 presupposes a full and up to date technical description
                 in order to assess the characteristics of the system
                 such as modularity, portability, reliability, etc.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Crowley:1981:ADP,
  author =       "John D. Crowley",
  title =        "The application development process: {What}'s wrong
                 with it?",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "179--187",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807927",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper will examine the process used in the
                 development of computer applications. The claim is made
                 that the current methodology has serious deficiencies,
                 but that a software development approach is becoming
                 available to help address these problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bailey:1981:SSU,
  author =       "C. T. Bailey and W. L. Dingee",
  title =        "A software study using {Halstead} metrics",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "1",
  pages =        "189--197",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800003.807928",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:03 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes an application of Maurice
                 Halstead's software theory to a real time switching
                 system. The Halstead metrics and the software tool
                 developed for computing them are discussed. Analysis of
                 the metric data indicates that the level of the
                 switching language was not constant across algorithms
                 and that software error data was not a linear function
                 of volume.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Esposito:1981:WCT,
  author =       "A. Esposito and A. Mazzeo and P. Costa",
  title =        "Workload characterization for trend analysis",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "5--15",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041800",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The methodology of analysis proposed in this paper
                 aims at predicting the workload of a computer. This
                 methodology consists of applying an algorithm of
                 clustering to the workload, its jobs being identified
                 by a pair $ (X, P) $, where $X$ is the resource-vector
                 of the job and $P$ stands for the priority given to the
                 job by the user. The hereby obtained clusters are then
                 associated to the $ a_i$ activities developed in the
                 system and determine the influence of each $ a_i$ to
                 the overall workload. By repeating this operation at
                 different times, either the periodicity or the
                 monotonic changes that may occur in each activity are
                 determined. This makes it possible to predict the
                 evolution of the overall workload and consequently to
                 evaluate changes to be carried out in the system. The
                 above methodology is applied to a specific case and is
                 illustrated in its various phases. The results obtained
                 have validated the method. The study is still going on,
                 with continuous periodical observations in order to
                 update the data.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Musa:1981:SRMc,
  author =       "J. D. Musa and A. Iannino",
  title =        "Software reliability modeling: accounting for program
                 size variation due to integration or design changes",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "16--25",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041801",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Estimation of software reliability quantities has
                 traditionally been based on stable programs; i.e.,
                 programs that are completely integrated and are not
                 undergoing design changes. Also, it is ordinarily
                 assumed that all code is being executed at one time or
                 another and that test or operational results are being
                 completely inspected for failures. This paper describes
                 a method for relaxing the foregoing conditions by
                 adjusting the lengths of the intervals between failures
                 experienced as compensation. The resulting set of
                 failure intervals represents the set that would have
                 occurred for a completely inspected program that was at
                 all times in its final configuration. The failure
                 intervals are then processed as they would be for a
                 stable program. The approach is developed for the
                 execution time theory of software reliability, but the
                 concepts could be applied to many other models as well.
                 Many definitions are given to describe program size
                 variation and associated phenomena. Attention is
                 focused on the special case of sequential integration
                 and pure growth. The adjustment method is described and
                 its benefits in improving the estimation of quantities
                 of interest to the software manager are illustrated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Comer:1981:CTD,
  author =       "J. R. Comer and J. R. Rinewalt and M. M. Tanik",
  title =        "A comparison of two different program complexity
                 measures",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "26--28",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041802",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In recent years, a number of program complexity
                 metrics have been developed to measure various
                 characteristics of computer programs [1, 3]. Included
                 among these metrics are Zolnowski's composite measure
                 of program complexity [4, 5] and McCade's cyclomatic
                 measure of program complexity [2]. The present paper
                 examines these two metrics and attempts to measure
                 their correlation with a third metric assigned by the
                 program's author. This metric has been called the
                 psychological complexity or the intuitive complexity of
                 a program.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Abrams:1981:NNM,
  author =       "Marshall D. Abrams and Dorothy C. Neiman",
  title =        "{NBS} network measurement methodology applied to
                 synchronous communications",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "29--36",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041803",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper focuses on the application of the NBS
                 Network Measurement Instrument (NMI) to synchronous
                 data communication. The suitability of the underlying
                 Stimulus --- Acknowledgement --- Response (SAR) model
                 to support the implementation of this methodology
                 permitting quantitative evaluation of interactive
                 teleprocessing service delivered to the user is
                 described. The logic necessary to interpret SAR
                 components and boundaries depends on character time
                 sequence for asynchronous data communications traffic
                 but entails protocol decomposition and content analysis
                 for character synchronous data traffic. The
                 decomposition and analysis rules necessary to evaluate
                 synchronous communications are discussed and the level
                 of protocol violation detection which results as a
                 byproduct is cited. Extensions to the utility of the
                 Network Measurement Instrument (NMI), deriving from
                 additional workload profiling measures desirable for
                 character synchronous communications, are also
                 presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "data communications; protocol validation; synchronous;
                 teleprocessing service evaluation",
}

@Article{Larsen:1981:CEL,
  author =       "R. L. Larsen and J. R. Agre and A. K. Agrawala",
  title =        "A comparative evaluation of local area communication
                 technology",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "37--47",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041804",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The builder of a local area network is immediately
                 confronted with the selection of a communications
                 architecture to interconnect the elements (hosts and
                 terminals) of the network. This choice must often be
                 made in the presence of great uncertainty regarding the
                 available alternatives and their capabilities, and a
                 dearth of comparative information. This was the
                 situation confronting NASA upon seriously considering
                 local area networks as an architecture for mission
                 support operations. As a result, a comparative study
                 was performed in which alternative communication
                 architectures were evaluated under similar operating
                 conditions and system configurations. Considered were:
                 (1) the ring, (2) the cable-bus, (3) a
                 circuit-switching system, and (4) a shared memory
                 system. The principle performance criterion used was
                 the mean time required to move a message from one host
                 processor to another host processor. Local operations
                 within each host, such as interrupt service time, were
                 considered to be part of this overall time. The
                 performance of each alternative was evaluated through
                 simulation models and is summarized in this paper.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hughes:1981:HPT,
  author =       "Herman D. Hughes",
  title =        "A highly parameterized tool for studying performance
                 of computer systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "48--65",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041805",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A highly parameterized simulation model is described
                 which allows experiments to be performed for computer
                 performance evaluations studies. The results of these
                 experiments can be used to evaluate the effect of
                 changing the hardware configuration, the workload, the
                 scheduling policy, the multiprogramming level, etc. The
                 model is constructed to function either as a batch or
                 time-sharing system, or as a combination of both. This
                 simulation model also has the potential of providing
                 dynamic feedback for the scheduler. A discussion of the
                 design, implementation, and use of the model is
                 presented. Examples are provided to illustrate some
                 possible uses of the model and verifications of the
                 results obtained from the model.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "cumulative distribution function; events; hardware
                 configuration; model validation; queue; scheduling
                 policies; simulation model; system performance;
                 workloads",
}

@Article{Spiegel:1981:RPP,
  author =       "Mitchell G. Spiegel",
  title =        "{RTE}'s: past is prologue",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "2",
  pages =        "66--73",
  month =        "Summer",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041799.1041806",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:56:45 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper surveys the evolution of Remote Terminal
                 Emulators (RTEs). Major developments in RTE technology
                 are separated into three `generations' of products.
                 Each generation's unique applications and features are
                 highlighted. Recent developments are noted and a
                 prediction of future use for RTEs is provided.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Browne:1981:DSP,
  author =       "J. C. Browne",
  title =        "Designing systems for performance",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "1--1",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805467",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Real-time systems and systems to interface human work
                 environments will dominate the growth of computer
                 applications over the next decade. These systems must
                 execute their functions with the timeliness and
                 responsiveness required in these environments. The
                 design, development and testing of such systems must
                 guarantee performance as well as functionality and
                 reliability. There is not yet in place a technology to
                 support this requirement for engineering of
                 performance. The research and development community in
                 performance has focused primarily on analysis and
                 deduction rather than the performance arena. This talk
                 will define and discuss the tasks of engineering
                 performance into software systems and describe the
                 recent progress towards this goal.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Reiner:1981:MAP,
  author =       "David Reiner and Tad Pinkerton",
  title =        "A method for adaptive performance improvement of
                 operating systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "2--10",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805468",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a method for dynamic modification
                 of operating system control parameters to improve
                 system performance. Improved parameter settings are
                 learned by experimenting on the system. The experiments
                 compare the performance of alternative parameter
                 settings in each region of a partitioned
                 load-performance space associated with the system. The
                 results are used to modify important control parameters
                 periodically, responding to fluctuations in system load
                 and performance. The method can be used to implement
                 adaptive tuning, to choose between alternative
                 algorithms and policies, or to select the best fixed
                 settings for parameters which are not modified. The
                 method was validated and proved practical by an
                 investigation of two parameters governing core quantum
                 allocation on a Sperry Univac 1100 system. This
                 experiment yielded significant results, which are
                 presented and discussed. Directions for future research
                 include automating the method, determining the effect
                 of simultaneous modifications to unrelated control
                 parameters, and detecting dominant control
                 parameters.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wang:1981:VVT,
  author =       "Y. T. Wang",
  title =        "On the {VAX\slash VMS} time-critical process
                 scheduling",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "11--18",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805469",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The VAX/VMS process schedule is briefly described. A
                 simple priority-driven round-robin queuing model is
                 then constructed to analyze the behavior of the
                 time-critical processes of VAX/VMS under such a
                 schedule. Mean and variance of the conditional response
                 time of a process at a given priority are derived,
                 conditioned on the amount of service time required by
                 that process. Numerical results are given with
                 comparisons to the ordinary priority queuing systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Soderlund:1981:ECP,
  author =       "Lars S{\"o}derlund",
  title =        "Evaluation of concurrent physical database
                 reorganization through simulation modeling",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "19--32",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805470",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The performance of a database system commonly
                 deteriorates due to degradation of the database's
                 physical data structure. The structure degradation is a
                 consequence of the normal operations of a general
                 database management system. When system performance has
                 degraded below acceptable limits the database must be
                 reorganized. In conventional, periodic reorganization
                 the database, or part of it, is taken off line while
                 the data structure is being reorganized. This paper
                 presents results from a study where it is shown that
                 concurrent reorganization, i.e. a continuous
                 reorganization of the physical data structure while
                 application processes have full access to the database,
                 is an attractive alternative to conventional
                 reorganization. The paper also presents a solution to a
                 methodological problem concerning the simulation of a
                 system which has activities with extremely varying
                 durations.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lazowska:1981:AMD,
  author =       "Edward D. Lazowska and John Zahorjan",
  title =        "Analytic modelling of disk {I/O} subsystems: a
                 tutorial",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "33--35",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805471",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This is a summary of a tutorial presented during the
                 conference discussing a number of approaches to
                 representing disk I/O subsystems in analytic models of
                 computer systems. As in any analytic modelling study,
                 the fundamental objective in considering an I/O
                 subsystem is to determine which devices should be
                 represented in the model, and what their loadings
                 should be. The device loadings represent the service
                 required by jobs, and are the basic parameters needed
                 by the computational algorithm which calculates
                 performance measures for the model. To set these
                 parameters, knowledge of service times at the various
                 devices in the I/O subsystem is required. The tutorial
                 begins by distinguishing analytic modelling from
                 alternative approaches, by identifying the parameter
                 values that are required for an analytic modelling
                 study, and by explaining the role of the computational
                 algorithm that is employed (Denning \& Buzen [1978]
                 provide a good, although lengthy, summary). We then
                 consider a sequence of models of increasingly complex
                 I/O subsystems. Next we discuss I/O subsystems with
                 rotational position sensing. We then discuss approaches
                 to modelling shared DASD, emphasizing hierarchical
                 techniques in which highlevel models of each system can
                 be analyzed in isolation. We also mention recent
                 techniques for modelling complex I/O subsystems
                 involving multipathing. Finally, we discuss the
                 analysis of I/O subsystems based on broadcast channels
                 such as Ethernet.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Dowdy:1981:MUS,
  author =       "Lawrence W. Dowdy and Hans J. Breitenlohner",
  title =        "A model of {Univac 1100\slash 42} swapping",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "36--47",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805472",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The performance of a computer system depends upon the
                 efficiency of its swapping mechanisms. The swapping
                 efficiency is a complex function of many variables. The
                 degree of multiprogramming, the relative loading on the
                 swapping devices, and the speed of the swapping devices
                 are all interdependent variables that affect swapping
                 performance. In this paper, a model of swapping
                 behavior is given. The interdependencies between the
                 degree of multiprogramming, the swapping devices'
                 loadings, and the swapping devices' speeds are modeled
                 using an iterative scheme. The validation of a model is
                 its predictive capability. The given swapping model was
                 applied to a Univac 1100/42 system to predict the
                 effect of moving the swapping activity from drums to
                 discs. When the swapping activity was actually moved,
                 throughput increased by 20\%. The model accurately
                 predicted this improvement. Subtopics discussed
                 include: (1) the modeling of blocked and overlapped
                 disc seek activity, (2) the usefulness of empirical
                 formulae, and (3) the calibration of unmeasurable
                 parameters. Extensions and further applications of the
                 model are given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Closed queuing networks; Model validation; Parameter
                 interdependencies; Performance prediction; Swapping",
}

@Article{Turner:1981:SFP,
  author =       "Rollins Turner and Henry Levy",
  title =        "Segmented {FIFO} page replacement",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "48--51",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805473",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A fixed-space page replacement algorithm is presented.
                 A variant of FIFO management using a secondary FIFO
                 buffer, this algorithm provides a family of performance
                 curves lying between FIFO and LRU. The implementation
                 is simple, requires no periodic scanning, and uses no
                 special hardware support. Simulations are used to
                 determine the performance of the algorithm for several
                 memory reference traces. Both the fault rates and
                 overhead cost are examined.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "FIFO page replacement; LRU page replacement; Page
                 replacement algorithms; Performance evaluation",
}

@Article{Ferrari:1981:GMW,
  author =       "Domenico Ferrari",
  title =        "A generative model of working set dynamics",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "52--57",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805474",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An algorithm for generating a page reference string
                 which exhibits a given working set size behavior in the
                 time domain is presented, and the possible applications
                 of such a string are discussed. The correctness of the
                 algorithm is proved, and its computational complexity
                 found to be linear in the length of the string. A
                 program implementing the algorithm, which is performed
                 in one pass and requires very little space, is briefly
                 described, and some experimental results are given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Zahorjan:1981:BJB,
  author =       "J. Zahorjan and K. C. Sevcik and D. L. Eager and B. I.
                 Galler",
  title =        "Balanced job bound analysis of queueing networks",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "58--58",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805475",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Applications of queueing network models to computer
                 system performance prediction typically involve the
                 computation of their equilibrium solution. When
                 numerous alternative systems are to be examined and the
                 numbers of devices and customers are large, however,
                 the expense of computing the exact solutions may not be
                 warranted by the accuracy required. In such situations,
                 it is desirable to be able to obtain bounds on the
                 system solution with very little computation.
                 Asymptotic bound analysis (ABA) is one technique for
                 obtaining such bounds. In this paper, we introduce
                 another bounding technique, called balanced job bounds
                 (BJB), which is based on the analysis of systems in
                 which all devices are equally utilized. These bounds
                 are tighter than ABA bounds in many cases, but they are
                 based on more restrictive assumptions (namely, those
                 that lead to separable queueing network models).",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Neuse:1981:SHA,
  author =       "D. Neuse and K. Chandy",
  title =        "{SCAT}: a heuristic algorithm for queueing network
                 models of computing systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "59--79",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805476",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a new algorithm for the
                 approximate analysis of closed product-form queueing
                 networks with fixed-rate, delay (infinite-server), and
                 load-dependent queues. This algorithm has the accuracy,
                 speed, small memory requirements, and simplicity
                 necessary for inclusion in a general network analysis
                 package. The algorithm allows networks with large
                 numbers of queues, job classes, and populations to be
                 analyzed interactively even on microcomputers with very
                 limited memory.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Analytic models; Approximations; Iterative algorithms;
                 Load-dependent queues; Performance analysis;
                 Product-form; Queueing networks",
}

@Article{Zahorjan:1981:SSQ,
  author =       "John Zahorjan and Eugene Wong",
  title =        "The solution of separable queueing network models
                 using mean value analysis",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "80--85",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805477",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Because it is more intuitively understandable than the
                 previously existing convolution algorithms, Mean Value
                 Analysis (MVA) has gained great popularity as an exact
                 solution technique for separable queueing networks.
                 However, the derivations of MVA presented to date apply
                 only to closed queueing network models. Additionally,
                 the problem of the storage requirement of MVA has not
                 been dealt with satisfactorily. In this paper we
                 address both these problems, presenting MVA solutions
                 for open and mixed load independent networks, and a
                 storage maintenance technique that we postulate is the
                 minimum possible of any ``reasonable'' MVA technique.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Thomasian:1981:ASQ,
  author =       "Alexander Thomasian and Behzad Nadji",
  title =        "Aggregation of stations in queueing network models of
                 multiprogrammed computers",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "86--104",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805478",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In queueing network models the complexity of the model
                 can be reduced by aggregating stations. This amounts to
                 obtaining the throughput of the flow-equivalent station
                 for the subnetwork of stations to be aggregated. When
                 the subnetwork has a separable solution, aggregation
                 can be carried out using the Chandy--Herzog--Woo
                 theorem. The throughput of the subnetwork can be
                 expressed explicitly in terms of its parameters when
                 the stations are balanced (have equal utilizations).
                 This expression for throughput can be used as an
                 approximation when the stations are relatively
                 unbalanced. The basic expression can be modified to
                 increase the accuracy of the approximation. A
                 generating function approach was used to obtain upper
                 bounds on the relative error due to the basic
                 approximation and its modifications. Provided that the
                 relative error bound is tolerable, a set of unbalanced
                 stations can be replaced by a single aggregate station
                 or a set of balanced stations. Finally, we propose a
                 methodology to simplify the queueing network model of a
                 large-scale multiprogrammed computer, which makes use
                 of the previous aggregation results.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Schwetman:1981:CSM,
  author =       "Herb Schwetman",
  title =        "Computer system models: an introduction",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "105--105",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805479",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A system model is a tool used to predict system
                 performance under changing conditions. There are two
                 widely used modeling techniques: one based on discrete
                 event simulation and one based on queuing theory
                 models. Because queueing theory models are so much
                 cheaper to implement and use, as compared to simulation
                 models, there is growing interest in them. Users are
                 developing and using queuing theory models to project
                 system performance, project capacity, analyze
                 bottlenecks and configure systems. This talk uses an
                 operational analysis approach to develop system models.
                 This approach, as presented in Denning and Buzen [1],
                 provides an intuitive basis for analyzing system
                 performance and constructing system models. Very simple
                 calculations lead to estimates of bounds on performance
                 --- maximum job throughput rates and minimum message
                 response times. The emphasis is on gaining an
                 understanding of system models which reenforces
                 intuition, not on mathematical formulae. Several
                 examples are included. References to other works and
                 publications are provided. Application areas and
                 limitations of modeling techniques are discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Denning:1981:PEE,
  author =       "Peter J. Denning",
  title =        "Performance evaluation: {Experimental} computer
                 science at its best",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "106--109",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805480",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "What is experimental computer science? This question
                 has been widely discussed ever since the Feldman Report
                 was published (1979 [18]). Many computer scientists
                 believe that survival of their discipline is intimately
                 linked to their ability to rejuvenate experimentation.
                 The National Science Foundation instituted the
                 Coordinated Experimental Research Program (CERP) in
                 1979 to help universities set up facilities capable of
                 supporting experimental research. Other agencies of
                 government are considering similar programs. Some
                 industrial firms are offering similar help through
                 modest cash grants and equipment discounts. What is
                 experimental computer science? Surprisingly, computer
                 scientists disagree on the answer. A few believe that
                 computer science is in flux --- making a transition
                 from theoretical to experimental science --- and,
                 hence, no operational definition is yet available. Some
                 believe that it is all the non-theoretical activities
                 of computer science, especially those conferring
                 ``hands-on'' experience. Quite a few believe that it is
                 large system development projects --- i.e., computer
                 and software engineering --- and they cite MIT's
                 Multics, Berkeley's version of Bell Labs' UNIX, the
                 ARPAnet, IBM's database System R, and Xerox's
                 Ethernet-based personal computer network as examples.
                 These beliefs are wrong. There are well-established
                 standards for experimental science. The field of
                 performance evaluation meets these standards and
                 provides examples of experimental science for the rest
                 of the computing field.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rafii:1981:SAM,
  author =       "Abbas Rafii",
  title =        "Structure and application of a measurement tool ---
                 {SAMPLER\slash 3000}",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "110--120",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805481",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Design, internal structure, implementation experience
                 and a number of unique features of the SAMPLER/3000
                 performance evaluation tool are presented. This package
                 can be used to produce program CPU and wait time
                 profiles in several levels of detail in terms of code
                 segments, procedure names and procedure relative
                 addresses. It also provides an accurate profile of the
                 operating systems code which is exercised to service
                 requests from the selective parts of the user code.
                 Programs can be observed under natural load conditions
                 in a single user or shared environment. A program's CPU
                 usage is determined in terms of direct and indirect
                 cost components. The approaches to determine direct and
                 indirect CPU times are described. A program counter
                 sampling technique in virtual memory domain is
                 discussed. Certain interesting aspects of data analysis
                 and on-line data presentation techniques are described.
                 The features of the computer architecture, the services
                 of the loader and compilers which relate to the
                 operation of the tool are discussed. A case study is
                 finally presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Tolopka:1981:ETM,
  author =       "Stephen Tolopka",
  title =        "An event trace monitor for the {VAX 11\slash 780}",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "121--128",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805482",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes an event trace monitor
                 implemented on Version 1.6 of the VMS operating system
                 at Purdue University. Some necessary VMS terminology is
                 covered first. The operation of the data gathering
                 mechanism is then explained, and the events currently
                 being gathered are listed. A second program, which
                 reduces the data gathered by the monitor to usable
                 form, is next examined, and some examples depicting its
                 operation are given. The paper concludes with a brief
                 discussion of some of the monitor's uses.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Artis:1981:LFD,
  author =       "H. Pat Artis",
  title =        "A log file design for analyzing secondary storage
                 occupancy",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "129--135",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805483",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A description of the design and implementation of a
                 log file for analyzing the occupancy of secondary
                 storage on IBM computer systems is discussed. Typical
                 applications of the data contained in the log are also
                 discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sanguinetti:1981:ESS,
  author =       "John Sanguinetti",
  title =        "The effects of solid state paging devices in a large
                 time-sharing system",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "136--153",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805484",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper reports the results of some measurements
                 taken on the effects two new solid state paging
                 devices, the STC 4305 and the Intel 3805, have on
                 paging performance in the Michigan Terminal System at
                 the University of Michigan. The measurements were taken
                 with a software monitor using various configurations of
                 the two solid state devices and the fixed head disk,
                 which they replace. Measurements were taken both during
                 regular production and using an artificial load created
                 to exercise the paging subsystem. The results confirmed
                 the expectation that the solid state paging devices
                 provide shorter page-in waiting times than the
                 fixed-head disk, and also pointed up some of the
                 effects which their differing architectures have on the
                 system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wang:1981:VMB,
  author =       "Richard T. Wang and J. C. Browne",
  title =        "Virtual machine-based simulation of distributed
                 computing and network computing",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "154--156",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805485",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper proposes the use of virtual machine
                 architectures as a means of modeling and analyzing
                 networks and distributed computing systems. The
                 requirements for such modeling and analysis are
                 explored and defined along with an illustrative study
                 of an X.25 link-level protocol performance under normal
                 execution conditions. The virtualizable architecture
                 used in this work is the Data General Nova 3/D.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Huslende:1981:CEP,
  author =       "Ragnar Huslende",
  title =        "A combined evaluation of performance and reliability
                 for degradable systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "157--164",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805486",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "As the field of fault-tolerant computing is maturing
                 and results from this field are taken into practical
                 use the effects of a failure in a computer system need
                 not be catastrophic. With good fault-detection
                 mechanisms it is now possible to cover a very high
                 percentage of all the possible failures that can occur.
                 Once a fault is detected, systems are designed to
                 reconfigure and proceed either with full or degraded
                 performance depending on how much redundancy is built
                 into the system. It should be noted that one particular
                 failure may have different effects depending on the
                 circumstances and the time at which it occurs. Today we
                 see that large numbers of resources are being tied
                 together in complex computer systems, either locally or
                 in geographically distributed systems and networks. In
                 such systems it is obviously very undesirable that the
                 failure of one element can bring the entire system
                 down. On the other hand one can usually not afford to
                 design the system with sufficient redundancy to mask
                 the effect of all failures immediately.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Jacobson:1981:MSD,
  author =       "Patricia A. Jacobson and Edward D. Lazowska",
  title =        "The method of surrogate delays: {Simultaneous}
                 resource possession in analytic models of computer
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "165--174",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805487",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a new approach to modelling the
                 simultaneous or overlapped possession of resources in
                 queueing networks. The key concept is that of iteration
                 between two models, each of which includes an explicit
                 representation of one of the simultaneously held
                 resources and a delay server (an infinite server, with
                 service time but no queueing) acting as a surrogate for
                 queueing delay due to congestion at the other
                 simultaneously held resource. Because of this, we refer
                 to our approximation technique as the ``method of
                 surrogate delays''.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Jacobson:1981:AAM,
  author =       "Patricia Jacobson",
  title =        "Approximate analytic models of arbiters",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "175--180",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805488",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Results at very light and very heavy loads are easy to
                 obtain, but at intermediate loads performance modelling
                 is necessary. Because of the considerable cost of
                 simulation, we develop queueing network models which
                 can be solved quickly by approximate analytic
                 techniques. These models are validated by comparing
                 with simulations at certain points, and then used to
                 get a wide range of results quickly.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Briggs:1981:PCB,
  author =       "Fay{\'e} A. Briggs and Michel Dubois",
  title =        "Performance of cache-based multiprocessors",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "181--190",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805489",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A possible design alternative to improve the
                 performance of a multiprocessor system is to insert a
                 private cache between each processor and the shared
                 memory. The caches act as high-speed buffers, reducing
                 the memory access time, and affect the delays caused by
                 memory conflicts. In this paper, we study the
                 performance of a multiprocessor system with caches. The
                 shared memory is pipelined and interleaved to improve
                 the block transfer rate, and assumes an L-M
                 organization, previously studied under random word
                 access. An approximate model is developed to estimate
                 the processor utilization and the speedup improvement
                 provided by the caches. These two parameters are
                 essential to a cost-effective design. An example of a
                 design is treated to illustrate the usefulness of this
                 investigation.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bryant:1981:QNA,
  author =       "R. M. Bryant and J. R. Agre",
  title =        "A queueing network approach to the module allocation
                 problem in distributed systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "191--204",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800189.805490",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Given a collection of distributed programs and the
                 modules they use, the module allocation problem is to
                 determine an assignment of modules to processors that
                 minimizes the total execution cost of the programs.
                 Standard approaches to this problem are based on
                 solving either a network flow problem or a constrained
                 $0$-$1$ integer programming problem. In this paper we
                 discuss an alternative approach to the module
                 allocation problem where a closed, multiclass queueing
                 network is solved to determine the cost of a particular
                 module allocation. The advantage of this approach is
                 that the execution cost can be expressed in terms of
                 performance measures of the system such as response
                 time. An interchange heuristic is proposed as a method
                 of searching for a good module allocation using this
                 model and empirical evidence for the success of the
                 heuristic is given. The heuristic normally finds module
                 allocations with costs within 10 percent of the optimal
                 module allocation. Fast, approximate queueing network
                 solution techniques based on mean-value-analysis allow
                 each heuristic search to be completed in a few seconds
                 of CPU time. The computational complexity of each
                 search is $ O(M K (K + N) C)$ where $M$ is the number
                 of modules, $K$ is the number of sites in the network,
                 $N$ is the number of communications processors, and $C$
                 is the number of distributed program types. It appears
                 that substantial problems of this type could be solved
                 using the methods we describe.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Distributed computer systems; File assignment problem;
                 Mean-value analysis; Multiclass queueing network model;
                 Task allocation problem",
}

@Article{Marathe:1981:AME,
  author =       "Madhav Marathe and Sujit Kumar",
  title =        "Analytical models for an {Ethernet}-like local area
                 network link",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "3",
  pages =        "205--215",
  month =        "Fall",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010629.805491",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:00 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Ethernet-like local area network links have been
                 studied by a number of researchers. Most of these
                 studies have involved extensive simulation models
                 operating at the level of individual packets. However,
                 as we begin building models of systems built around
                 such links, detailed simulation models are neither
                 necessary, nor cost-effective. Instead, a simple
                 analytical model of the medium should be adequate as a
                 component of the higher level system models. This paper
                 discusses a number of analytical models and identifies
                 a last-in-first-out M/G/1 model with slightly increased
                 service time as one which adequately captures both the
                 mean and the coefficient of variation of the response
                 time. Given any offered load, this model can be used to
                 predict the mean waiting time and its coefficient of
                 variation. These two can be used to construct a
                 suitable 2 stage hyperexponential distribution. Random
                 numbers can then be drawn from this distribution for
                 use as waiting times of individual packets.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Pechura:1981:PLM,
  author =       "Michael A. Pechura",
  title =        "Page life measurements",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "10--12",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041865",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer performance analysis, whether it be for
                 design, selection or improvement, has a large body of
                 literature to draw upon. It is surprising, however,
                 that few texts exist on the subject. The purpose of
                 this paper is to provide a feature analysis of the four
                 major texts suitable for professional and academic
                 purposes.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer performance evaluation; computer system
                 selection",
}

@Article{Clark:1981:UES,
  author =       "Jon D. Clark",
  title =        "An update on economies-of-scale in computing systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "13--14",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041866",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A workshop on the theory and application of analytical
                 models to ADP system performance prediction was held on
                 March 12-13, 1979, at the University of Maryland. The
                 final agenda of the workshop is included as an
                 appendix. Six sessions were conducted: (1) theoretical
                 advances, (2) operational analysis, (3) effectiveness
                 of analytical modeling techniques, (4) validation, (5)
                 case studies and applications, and (6) modeling tools.
                 A summary of each session is presented below. A list of
                 references is provided for more detailed information.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Janusz:1981:GMS,
  author =       "Edward R. Janusz",
  title =        "Getting the most out of a small computer",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "22--35",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041867",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concept of a `working-set' of a program running in
                 a virtual memory environment is now so familiar that
                 many of us fail to realize just how little we really
                 know about what it is, what it means, and what can be
                 done to make such knowledge actually useful. This
                 follows, perhaps, from the abstract and apparently
                 intangible facade that tends to obscure the meaning of
                 working set. What we cannot measure often ranks high in
                 curiosity value, but ranks low in pragmatic utility.
                 Where we have measures, as in the page-seconds of
                 SMF/MVS, the situation becomes even more curious: here
                 a single number purports to tell us something about the
                 working set of a program, and maybe something about the
                 working sets of other concurrent programs, but not very
                 much about either. This paper describes a case in which
                 the concept of the elusive working set has been
                 encountered in practice, has been intensively analyzed,
                 and finally, has been confronted in its own realm. It
                 has been trapped, wrapped, and, at last, forced to
                 reveal itself for what it really is. It is not a
                 number! Yet it can be measured. And what it is,
                 together with its measures, turns out to be something
                 not only high in curiosity value, but also something
                 very useful as a means to predict the page faulting
                 behavior of a program running in a relatively complex
                 multiprogrammed environment. The information presented
                 here relates to experience gained during the conversion
                 of a discrete event simulation model to a hybrid model
                 which employs analytical techniques to forecast the
                 duration of `steady-state' intervals between mix-change
                 events in the simulation of a network-scheduled job
                 stream processing on a 370/168-3AP under MVS. The
                 specific `encounter' with the concept of working sets
                 came about when an analytical treatment of program
                 paging was incorporated into the model. As a result of
                 considerable luck, ingenuity, and brute-force
                 empiricism, the model won. Several examples of
                 empirically derived characteristic working set
                 functions, together with typical model results, are
                 supported with a discussion of relevant modeling
                 techniques and areas of application.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cox:1981:DDD,
  author =       "Springer Cox",
  title =        "Data, definition, deduction: an empirical view of
                 operational analysis",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "36--44",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041868",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper discussed the problems encountered and
                 techniques used in conducting the performance
                 evaluation of a multi-processor on-line manpower data
                 collection system. The two main problems were: (1) a
                 total lack of available software tools, and (2) many
                 commonly used hardware monitor measures (e.g., CPU
                 busy, disk seek in progress) were either meaningless or
                 not available. The main technique used to circumvent
                 these problems was detailed analysis of one-word
                 resolution memory maps. Some additional data collection
                 techniques were (1) time-stamped channel measurements
                 used to derive some system component utilization
                 characteristics and (2) manual stopwatch timings used
                 to identify the system's terminal response times.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Muramatsu:1981:SVQ,
  author =       "Hiroshi Muramatsu and Masahiro Date and Takanori
                 Maki",
  title =        "Structural validation in queueing network models of
                 computer systems",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "41--46",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041869",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The current status of an implementation of a
                 methodology relating load, capacity and service for IBM
                 MVS computer systems is presented. This methodology
                 encompasses systems whose workloads include batch, time
                 sharing and transaction processing. The implementation
                 includes workload classification, mix representation
                 and analysis, automatic benchmarking, and exhaust point
                 forecasting.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sauer:1981:NSS,
  author =       "Charles H. Sauer",
  title =        "Numerical solution of some multiple chain queueing
                 networks",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "47--56",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041870",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper reports the results of simulation
                 experiment of a model of a virtual memory computer. The
                 model consists of three major subsystems: Program
                 Behavior, Memory Allocation and Secondary Storage. By
                 adapting existing models of these subsystems an overall
                 model for the computer operation is developed and its
                 performance is tested for various design alternatives.
                 The results are reported for different paging devices,
                 levels of multiprogramming, job mixes, memory
                 allocation scheme, page service scheduling and page
                 replacement rate.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Nemeth:1981:AIP,
  author =       "Thomas A. Nemeth",
  title =        "An approach to interactive performance analysis in a
                 busy production system {(NOS/BE)}",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "57--73",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041808.1041815",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Many different ideas have been promulgated on
                 performance evaluation by software and hardware
                 monitoring or modelling, but most of these have
                 associated implementation problems in practice. By
                 adopting a slightly different approach, (using an
                 approximation to `service wait time'), an analysis of
                 response is possible in a production system, with
                 negligible overhead. This analysis allows the actual
                 areas of contention to be identified, and some rather
                 unexpected results emerge, with a direct application to
                 scheduling policy. The work was done using the NOS/BE
                 operating system on a CDC Cyber 173 at the University
                 of Adelaide.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "measurement; multiprogramming; performance evaluation;
                 production; response; scheduling; timesharing",
}

@Article{Knudson:1981:CPE,
  author =       "Michael E. Knudson",
  title =        "A computer performance evaluation operational
                 methodology",
  journal =      j-SIGMETRICS,
  volume =       "10",
  number =       "4",
  pages =        "74--80",
  month =        dec,
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041808.1041816",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:57:58 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A method suggesting how to organize and operate a
                 Computer Performance and Evaluation (CPE) project is
                 presented. It should be noted that the suggested
                 principles could apply to a modeling or simulation
                 effort.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Afshari:1981:MNT,
  author =       "P. V. Afshari and S. C. Bruell and R. Y. Kain",
  title =        "Modeling a new technique for accessing shared buses",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "4--13",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801685",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Consider a queueing system in which customers (or
                 jobs) arrive to one of $Q$ separate queues to await
                 service from one of $S$ identical servers (Figure 1).
                 Once a job enters a queue it does not leave that queue
                 until it has been selected for service. Any server can
                 serve any job from any queue. A job selected for
                 service cannot be preempted. In this paper we consider
                 jobs to be in a single class; for the multiple class
                 result see [AFSH81a]. We assume once a queue has been
                 selected, job scheduling from that queue is fair. In
                 particular, our results hold for first come first serve
                 as well as random selection [SPIR79] and, for that
                 matter, any fair nonpreemptive scheduling policy within
                 a queue. We assume that arrivals to each queue follow a
                 Poisson process with the mean arrival rate to queue $q$
                 being $ \lambda q$. The $S$ identical exponential
                 servers are each processing work at a mean rate of $
                 \mu $. This system is general enough to be adaptable
                 for modeling many different applications. By choosing
                 the policy employed for queue selection by the servers,
                 we can model multiplexers, channels, remote job entry
                 stations, certain types of communication processors
                 embedded in communication networks, and sets of shared
                 buses. In this paper we will use the latter application
                 to discuss a realistic situation. The elements
                 (``jobs'') in the queues are messages to be sent from
                 modules connected to the shared bus of the system. The
                 servers are the buses; their service times are equal to
                 the message transmission times. The queues are in the
                 interface modules connected to and sharing the buses.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lazar:1981:OCM,
  author =       "Aurel A. Lazar",
  title =        "Optimal control of a {M\slash M\slash m} queue",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "14--20",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801686",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The problem of optimal control of a M/M/m queueing
                 system is investigated. As in the M/M/l case the
                 optimum control is shown to be a window type mechanism.
                 The window size $L$ depends on the maximum allowable
                 time delay $T$ and can be explicitly computed. The
                 throughput time delay function of the M/M/m system is
                 briefly discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Spirn:1981:NMB,
  author =       "Jeffrey R. Spirn",
  title =        "Network modeling with bursty traffic and finite buffer
                 space",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "21--28",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801687",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper, we propose a class of queueing network
                 models, and a method for their approximate solution,
                 for computer networks with bursty traffic and finite
                 buffer space. The model is open, implying no population
                 limit except for buffer size limits and therefore no
                 window-type flow control mechanism. Each node of the
                 computer network is represented as a finite-length
                 queue with exponential service and an arrival process
                 which is initially bulk Poisson, but becomes less and
                 less clustered from hop to hop. Elaborations are
                 possible to account for varying mean packet sizes and
                 certain buffer pooling schemes, although these involve
                 further approximation. The approximations of the method
                 were validated against several simulations, with
                 reasonable agreement, and certainly with much less
                 error than is obtained by modeling a bursty traffic
                 source as Poisson.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lam:1981:ORN,
  author =       "Simon S. Lam and Y. Luke Lien",
  title =        "Optimal routing in networks with flow-controlled
                 virtual channels",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "38--46",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801688",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Packet switching networks with flow-controlled virtual
                 channels are naturally modeled as queueing networks
                 with closed chains. Available network design and
                 analysis techniques, however, are mostly based upon an
                 open-chain queueing network model. In this paper, we
                 first examine the traffic conditions under which an
                 open-chain model accurately predicts the mean
                 end-to-end delays of a closed-chain model having the
                 same chain throughputs. We next consider the problem of
                 optimally routing a small amount of incremental traffic
                 corresponding to the addition of a new virtual channel
                 (with a window size of one) to a network. We model the
                 new virtual channel as a closed chain. Existing flows
                 in the network are modeled as open chains. An optimal
                 routing algorithm is then presented. The algorithm
                 solves a constrained optimization problem that is a
                 compromise between problems of unconstrained
                 individual-optimization and unconstrained
                 network-optimization.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Livny:1981:LBH,
  author =       "Miron Livny and Myron Melman",
  title =        "Load balancing in homogeneous broadcast distributed
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "47--55",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801689",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Three different load balancing algorithms for
                 distributed systems that consist of a number of
                 identical processors and a CSMA communication system
                 are presented in this paper. Some of the properties of
                 a multi-resource system and the balancing process are
                 demonstrated by an analytic model. Simulation is used
                 as a mean for studying the interdependency between the
                 parameters of the distributed system and the behaviour
                 of the balancing algorithm. The results of this study
                 shed light on the characteristics of the load balancing
                 process.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wecker:1981:PGD,
  author =       "Stuart Wecker and Robert Gordon and James Gray and
                 James Herman and Raj Kanodia and Dan Seligman",
  title =        "Performance of globally distributed networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "58--58",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801690",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In the design and implementation of computer networks
                 one must be concerned with their overall performance
                 and the efficiency of the communication mechanisms
                 chosen. Performance is a major issue in the
                 architecture, implementation, and installation of a
                 computer communication network. The architectural
                 design always involves many cost/performance tradeoffs.
                 Once implemented, one must verify the performance of
                 the network and locate bottlenecks in the structure.
                 Configuration and installation of a network involves
                 the selection of a topology and communication
                 components, channels and nodes of appropriate capacity,
                 satisfying performance requirements. This panel will
                 focus on performance issues involved in the efficient
                 design, implementation, and installation of globally
                 distributed computer communication networks.
                 Discussions will include cost/performance tradeoffs of
                 alternative network architecture structures, methods
                 used to measure and isolate implementation performance
                 problems, and configuration tools to select network
                 components of proper capacity. The panel members have
                 all been involved in one or more performance issues
                 related to the architecture, implementation, and/or
                 configuration of the major networks they represent.
                 They will describe their experiences relating to
                 performance issues in these areas. Methodologies and
                 examples will be chosen from these networks in current
                 use. There will be time at the end of the session for
                 questions to the panel.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gordon:1981:OMH,
  author =       "R. L. Gordon",
  title =        "Operational measurements on a high performance ring",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "59--59",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801691",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Application and system software architecture can
                 greatly influence the operational statistics of a local
                 network. The implementation of a transparent file
                 system on top of a high bandwidth local network has
                 resulted in generating a high degree of file traffic
                 over the local network whose characteristics are
                 largely fixed and repeatable. These statistics will be
                 presented along with arguments for and against
                 designing mechanisms that optimize specifically for
                 that class of traffic.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Local networks; Performance; Remote files",
}

@Article{Gray:1981:PSL,
  author =       "James P. Gray",
  title =        "Performance of {SNA}'s {LU-LU} session protocols",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "60--61",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801692",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "SNA is both an architecture and a set of products
                 built in conformance with the architecture (1,2,3). The
                 architecture is layered and precisely defined; it is
                 both evolutionary and cost effective for implementing
                 products. Perhaps the largest component of cost
                 effectiveness is performance: transaction throughput
                 and response times. For SNA, this involves data link
                 control protocols (for SDLC and S/370 channel DLC's),
                 routing algorithms, protocols used on the sessions that
                 connect logical units (LU-LU session protocols), and
                 interactions among them. SNA's DLC and routing
                 protocols have been discussed elsewhere (4,5,6); this
                 talk examines protocols on sessions between logical
                 units (LU-LU session protocols) and illustrates the
                 results of design choices by comparing the performance
                 of various configurations.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Herman:1981:APT,
  author =       "James G. Herman",
  title =        "{ARPANET} performance tuning techniques",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "62--62",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801693",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "As part of its operation and maintenance of the
                 ARPANET for the past twelve years, BBN has been asked
                 to investigate a number of cases of degradation in
                 network performance. This presentation discusses the
                 practical methods and tools used to uncover and correct
                 the causes of these service problems. A basic iterative
                 method of hypothesis generation, experimental data
                 gathering, and analysis is described. Emphasis is
                 placed on the need for experienced network analysts to
                 direct the performance investigation and for the
                 availability of network programmers to provide special
                 purpose modifications to the network node software in
                 order to probe the causes of the traffic patterns under
                 observation. Many typical sources of performance
                 problems are described, a detailed list of the tools
                 used by the analyst are given, and a list of basic
                 techniques provided. Throughout the presentation
                 specific examples from actual ARPANET performance
                 studies are used to illustrate the points made.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Aleh:1981:DUB,
  author =       "Avner Aleh and K. Dan Levin",
  title =        "The determination of upper bounds for economically
                 effective compression in packet switching networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "64--72",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801694",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper deals with the economic tradeoffs
                 associated with data compression in a packet switching
                 environment. In section II we present the data profile
                 concept and the compression analysis of typical
                 file-transfer data strings. This is followed by a
                 compression cost saving model that is developed in
                 section III. Upper bounds for an economically effective
                 compression service are derived there, and the paper
                 concludes with an example of these bounds based on
                 state of the art technology.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{McGregor:1981:CMP,
  author =       "Patrick V. McGregor",
  title =        "Concentrator modeling with pipelining arrivals
                 compensation",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "73--94",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801695",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A general model of Intelligent Communications
                 Concentrating Devices (ICCD) is presented and analyzed
                 for delay and overflow performance with compensation
                 for the pipelining effect of message arrivals extending
                 over time. The results of the analysis indicate that,
                 for the same trunk utilization, the trend towards
                 buffered terminals with longer messages requires
                 substantially greater buffering in the ICCD. The
                 nominal environment analyzed consisted of 10--40 medium
                 speed terminals (1200 b/s--9600 b/s) operating over a
                 medium speed trunk (9600 b/s) with trunk utilizations
                 from 20 percent to 80 percent and average message
                 lengths up to 1000 characters. This is a substantially
                 different environment than that typically served by
                 current implementations of ICCDs, which are frequently
                 reported to have throughput improvements of 2-3 times
                 the nominal originating terminal bandwidths, as opposed
                 to the typical factor of 5 for the analyzed
                 environment. This does not reflect on the
                 appropriateness of the ICCDs in serving the new
                 environment, but rather is simply stating that in the
                 new environment the same character volume of traffic
                 may be appearing with different traffic characteristics
                 over higher speed access lines. If the new environment
                 shows only a difference in traffic characteristics and
                 originating line speed, without change in the traffic
                 control scheme (or lack of scheme), the results
                 indicate essentially reproduction of a large part of
                 the terminal buffering in the ICCD for adequate
                 overflow performance. Alternatively, with smarter
                 terminals, traffic control schemes (flow control) may
                 enable the ICCD to be reduced to an essentially
                 unbuffered ``traffic cop,'' with the terminal buffering
                 also serving as the shared facility buffering. Several
                 practical implementations of ICCDs have provision for
                 flow control, but require cooperating terminals and
                 hosts. This suggests that ICCD design and application
                 will become more sensitive to the practical operating
                 features of the target environment than has been
                 generally the case to date. The analysis presented in
                 this paper involves many simplifications to the actual
                 problem. Additional work to accommodate non-exponential
                 message length distributions and heterogeneous terminal
                 configurations are perhaps two of the more immediate
                 problems that may be effectively dealt with.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Mink:1981:MEC,
  author =       "Alan Mink and Charles B. {Silio, Jr.}",
  title =        "Modular expansion in a class of homogeneous networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "95--100",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801696",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider a special class of homogeneous computer
                 network comprising several essentially identical but
                 independent computing systems (ICSs) sharing a single
                 resource. Of interest here are the effects of modularly
                 expanding the network by adding ICSs. We use a
                 previously presented approximate queueing network model
                 to analyze modular expansion in this class of network.
                 The performance measure used in this analysis is the
                 mean cycle time, which is the mean time between
                 successive requests for service by the same job at the
                 CPU of an ICS. In this analysis we derive an
                 intuitively satisfying mathematical relation between
                 the addition of ICSs and the incremental increase in
                 the service rate of the shared resource required to
                 maintain the existing level of system performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Thareja:1981:UBA,
  author =       "Ashok K. Thareja and Satish K. Tripathi and Richard A.
                 Upton",
  title =        "On updating buffer allocation",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "101--110",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801697",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Most of the analysis of buffer sharing schemes has
                 been aimed at obtaining the optimal operational
                 parameters under stationary load situations. It is well
                 known that in most operating environments the traffic
                 load changes. In this paper, we address the problem of
                 updating buffer allocation as the traffic load at a
                 network node changes. We investigate the behavior of a
                 complete partitioning buffer sharing scheme to gain
                 insight into the dependency of the throughput upon
                 system parameters. The summary of the analysis is
                 presented in the form of a heuristic. The heuristic is
                 shown to perform reasonably well under two different
                 types of stress tests.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Elsanadidi:1981:ATW,
  author =       "M. Y. Elsanadidi and Wesley W. Chu",
  title =        "An analysis of a time window multiaccess protocol with
                 collision size feedback {(WCSF)}",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "112--118",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801698",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We analyze the performance of a window multiaccess
                 protocol with collision size feedback. We obtain bounds
                 on the throughput and the expected packet delay, and
                 assess the sensitivity of the performance to collision
                 recognition time and packet transmission time. An
                 approximate optimal window reduction factor to minimize
                 packet isolation time is {equation}, where $n$ is the
                 collision size and $R$ the collision recognition time
                 (in units of packet propagation delay). The WCSF
                 protocol, which requires more information than CSMA-CD,
                 is shown to have at least 30\% more capacity than
                 CSMA-CD for high bandwidth channels; that is, when
                 packet transmission time is comparable to propagation
                 delay. The capacity gain of the WCSF protocol decreases
                 as the propagation delay decreases and the collision
                 recognition time increases. Our study also reveals the
                 inherent stability of WCSF. When the input load
                 increases beyond saturation. The throughput remains at
                 its maximum value.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Roehr:1981:PALa,
  author =       "Kuno M. Roehr and Horst Sadlowski",
  title =        "Performance analysis of local communication loops",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "119--129",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801699",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The communication loops analyzed here provide an
                 economic way of attaching many different terminals
                 which may be some kilometers away from a host
                 processor. Main potential bottlenecks were found to be
                 the loop transmission speed, the loop adapter
                 processing rate, and the buffering capability, all of
                 which are analyzed in detail. The buffer overrun
                 probabilities are found by convolving individual buffer
                 usage densities and by summing over the tail-end of the
                 obtained overall density function. Examples of analysis
                 results are given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sherman:1981:DVH,
  author =       "R. H. Sherman and M. G. Gable and A. W. Chung",
  title =        "Distributed virtual hosts and networks: {Measurement}
                 and control",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "130--136",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801700",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Diverse network application requirements bring about
                 local networks of various size, degree of complexity
                 and architecture. The purpose of this paper is to
                 present a network protocol layer which is used to
                 provide a homogeneous operating environment and to
                 ensure the availability of network resources. The
                 network layer process probes the underlying local
                 network to discover its properties and then adapts to
                 changing network conditions. The principle contribution
                 of this paper is to generalize properties of diverse
                 local networks which can be measured. This is important
                 when considering maintenance and service of various
                 communication links. Three type of links are
                 point-to-point links, multi-drop, loop or switched
                 links and multi-access contention data buses. A
                 prototype network is used to show a complexity
                 improvement in the number of measurement probes
                 required using a multi-access contention bus. Examples
                 of measurement techniques and network adaptation are
                 presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brice:1981:NPA,
  author =       "Richard Brice and William Alexander",
  title =        "A network performance analyst's workbench",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "138--146",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801701",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Performance measurement and analysis of the behavior
                 of a computer network usually requires the application
                 of multiple software and hardware tools. The location,
                 functionality, data requirements, and other properties
                 of the tools often reflect the distribution of
                 equipment in the network. We describe how we have
                 attempted to organize a collection of tools into a
                 single system that spans a broad subset of the
                 measurement and analysis activities that occur in a
                 complex network of heterogeneous computers. The tools
                 are implemented on a pair of dedicated midicomputers. A
                 database management system is used to couple the data
                 collection and analysis tools into a system highly
                 insulated from evolutionary changes in the composition
                 and topology of the network.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{DuBois:1981:HMS,
  author =       "Donald F. DuBois",
  title =        "A {Hierarchical Modeling System} for computer
                 networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "147--155",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801702",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes the Hierarchical Modeling System
                 (HMS). HMS is a tool --- a unified and expandable
                 system --- which supports the development of analytic
                 and simulator models of computer networks. The same
                 system and workload descriptions can be interpreted as
                 analytic queueing models with optimization techniques
                 or as discrete event simulation models. The rationale
                 behind the development of HMS is that high level
                 analyses incorporating analytic techniques may be used
                 in the early design phase for networks when many
                 options are considered while detailed simulation
                 studies of fewer design alternatives are appropriate
                 during the later stages.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Analytic models; Computer networks; Hierarchical
                 models; Performance evaluation; Simulation",
}

@Article{Terplan:1981:NPR,
  author =       "K. Terplan",
  title =        "Network performance reporting",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "156--170",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801703",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Managing networks using Network Administration Centers
                 is increasingly considered. After introducing the
                 information demand for operational, tactical and
                 strategic network management the paper is dealing with
                 the investigation of the applicability of tools and
                 techniques for these areas. Network monitors and
                 software problem determination tools are investigated
                 in greater detail. Also implementation details for a
                 multihost-multinode network including software and
                 hardware tools combined by SAS are discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Spiegel:1981:QLA,
  author =       "Mitchell G. Spiegel",
  title =        "Questions for {Local Area Network} panelists",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "172--172",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801704",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Much has been written and spoken about the
                 capabilities of emerging designs for Local Area
                 Networks (LAN's). The objective for this panel session
                 was to gather together companies and agencies that have
                 brought LAN's into operation. Questions about the
                 performance of LANs have piqued the curiosity of the
                 computer/communications community. Each member of the
                 panel briefly described his or her LAN installation and
                 workload as a means of introduction to the audience.
                 Questions about performance were arranged into a
                 sequence by performance attributes. Those attributes
                 thought to be of greatest important were discussed
                 first. Discussion on the remainder of the attributes
                 continued as time and audience interaction permitted.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Roehr:1981:PALb,
  author =       "Kuno M. Roehr and Horst Sadlowski",
  title =        "Performance analysis of local communication loops",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "173--173",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801705",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The communication loops analyzed here provide an
                 economical way of attaching many different terminals to
                 a IBM 4331 host processor which may be several
                 kilometers away. As a first step of the investigation
                 protocol overhead is derived. It consists of request
                 and transmission headers and the associated
                 acknowledgements as defined by the System Network
                 Architecture. Additional overhead is due to the
                 physical layer protocols of the Synchronous Data Link
                 Control including lower level confirmation frames. The
                 next step is to describe the performance
                 characteristics of the loop attachment hardware,
                 primarily consisting of the external loop station
                 adapters for local and teleprocessing connections and
                 the loop adapter processor.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sternick:1981:SAD,
  author =       "Barbara R. Sternick",
  title =        "Systems aids in determining {Local Area Network}
                 performance characteristics",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "174--174",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801706",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "At Bethesda, Maryland, the National Library of
                 Medicine has a large array of heterogeneous data
                 processing equipment dispersed over ten floors in the
                 Lister Hill Center and four floors in the Library
                 Building. The National Library of Medicine decided to
                 implement a more flexible, expansible access medium
                 (Local Area Network (LAN)) to handle the rapid growth
                 in the number of local and remote users and the
                 changing requirements. This is a dual coaxial cable
                 communications system designed using cable television
                 (CATV) technology. One cable, the outbound cable,
                 transfers information between the headend and the user
                 locations. The other cable, the inbound cable,
                 transfers information from the user locations to the
                 headend. This system will permit the distribution of
                 visual and digital information on a single medium.
                 On-line devices, computers, and a technical control
                 system network control center are attached to the LAN
                 through BUS Interface Units (BIUs). The technical
                 control system will collect statistical and status
                 information concerning the traffic, BIUs, and system
                 components. The BIUs will, at fixed intervals, transmit
                 status information to the technical control. The
                 Network Control Centers (NCC) will provide network
                 directory information for users of the system,
                 descriptions of the services available, etc. A X.25
                 gateway BIU will interface the LAN to the public
                 networks (Telenet and Tymnet) and to X.25 host computer
                 systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Anonymous:1981:AI,
  author =       "Anonymous",
  title =        "Authors Index",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "1",
  pages =        "175--175",
  month =        "Spring",
  year =         "1981",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800047.801707",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:02 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rajaraman:1982:PET,
  author =       "M. K. Rajaraman",
  title =        "Performance evaluation through job scheduler
                 modeling",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "9--15",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010673.800501",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The scheduler in the Cyber-176 computer does the major
                 functions of routing the job through the system,
                 controlling job's progress through aging and swapping
                 of jobs between various queues and resource allocation
                 among jobs. This paper reports some results of the
                 performance evaluation study of the Cyber-176 by
                 modeling the scheduler as the heart of the system. The
                 study explores the effects of varying the scheduler
                 parameters in the performance of the machine in a
                 particular installation. The basic theme of the paper
                 is that the selection of parameters in a laboratory or
                 a system test environment may not always result in the
                 best performance in an actual installation. The
                 simulation provides vital information for installation
                 management and tuning the operating system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Mager:1982:TPA,
  author =       "Peter S. Mager",
  title =        "Toward a parametric approach for modeling local area
                 network performance",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "17--28",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010673.800502",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The task of modeling the performance of a single
                 computer (host) with associated peripheral devices is
                 now well understood [Computer 80]. In fact, highly
                 usable tools based on analytical modeling techniques
                 are commercially available and in widespread use
                 throughout the industry. [Buzen 78] [Buzen 81] [Won 81]
                 These tools provide a mechanism for describing
                 computerized environments and the workloads to be
                 placed on them in a highly parameterized manner. This
                 is important because it allows users to describe their
                 computer environments in a structured way that avoids
                 unnecessary complexity. It also is helpful in
                 facilitating intuitive interpretations of modeling
                 results and applying them to capacity planning
                 decisions. A first step toward building a modeling tool
                 and associated network specification language that
                 allows straightforward, inexpensive, and interpretable
                 modeling of multi-computer network performance is to
                 identify the set of characteristics (parameters) that
                 most heavily influence that performance. The result of
                 such a study for the communication aspects of local
                 area networks is the subject of this paper.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gaffney:1982:SSI,
  author =       "John E. {Gaffney, Jr.}",
  title =        "Score `82 --- a summary (at {IBM Systems Research
                 Institute}, 3\slash 23-3\slash 24\slash 82)",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "30--32",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010673.800503",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "``Score `82'', the first workshop on software counting
                 rules, was attended by practitioners who are working
                 with ``software metrics''. The concern was with
                 methodologies for counting such software measurables as
                 the number of ``operators'', ``operands'' or the number
                 of lines of code in a program. A ``metric'' can be a
                 directly countable ``measurable'' or a quantity
                 computable from one or several such ``measurables''.
                 ``Metrics'' quantify attributes of the software
                 development process, the software itself, or some
                 aspect of the interaction of the software with the
                 processor that hosts it. In general, a ``metric''
                 should be useful in the development of software and in
                 measuring its quality. It should have some theory to
                 support its existence, and it should be based on actual
                 software data. This workshop was concerned principally
                 with the data aspects of ``metrics'', especially with
                 the rules underlying the collection of the data from
                 which they are computed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Misek-Falkoff:1982:NFS,
  author =       "Linda D. Misek-Falkoff",
  title =        "The new field of {``Software Linguistics''}: an
                 early-bird view",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "35--51",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800002.800504",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The phrase ``Software Linguistics'' is applied here to
                 a text-based perspective on software quality matters.
                 There is much in the new work on Software Metrics
                 generally, and Software Science in particular, that is
                 reminiscent of the activities of Natural Language
                 analysis. Maurice Halstead held that Software Science
                 could shed light on Linguistics; this paper sketches
                 some mutually informing reciprocities between the two
                 fields, and across related areas of textual, literary,
                 discourse, and communications analysis.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Ease of use; Linguistics; Metrics; Natural language
                 analysis; Quality; Software science; Text complexity",
}

@Article{Spiegel:1982:SCR,
  author =       "Mitchell G. Spiegel",
  title =        "Software counting rules: {Will} history repeat
                 itself?",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "52--56",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800002.800505",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Counting rules in the software metrics field have been
                 developed for counting such software measurables as the
                 occurrence of operators, operands and the number of
                 lines of code. A variety of software metrics, such as
                 those developed by Halstead and others, are computed
                 from these numbers. Published material in the software
                 metrics field has concentrated on relationships between
                 various metrics, comparisons of values obtained for
                 different languages, etc. Yet, little, if anything has
                 been published on assumptions, experimental designs, or
                 the nature of the counting tools (or programs)
                 themselves used to obtain the basic measurements from
                 which these metrics are calculated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kavi:1982:EDS,
  author =       "Krishna M. Kavi and U. B. Jackson",
  title =        "Effect of declarations on software metrics: an
                 experiment in software science",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "57--71",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800002.800506",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The attractiveness of software science [HAL77] is to
                 some extent due to the simplicity of its
                 instrumentation. Upon learning the detailed rules of
                 counting operators and operands, the experiments and
                 derivations using various algorithms and languages can
                 be repeated. Proposed or actual applications of
                 software science are quite varied (For example, see
                 [SEN79]). The size and construction time of a program
                 can be estimated from the problem specification and the
                 choice of programming language. An estimate of the
                 number of program bugs can be shown to depend on
                 programming effort. Optimal choice of module sizes for
                 multimodule implementations can be computed. Elements
                 of software science have applications to the analysis
                 of technical prose. The purpose of this experiment is
                 three fold. First, we want to apply software science
                 metrics to the language `C'. The second purpose of the
                 experiment is to study the effect of including
                 declaration statements while counting operators and
                 operands. Finally, we have set out to determine whether
                 the area of application has any influence on software
                 science metrics.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gaffney:1982:MIC,
  author =       "John E. {Gaffney, Jr.}",
  title =        "{Machine Instruction Count Program}",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "72--79",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800002.800507",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Machine Instruction Count Program (MIC Program)
                 was originally developed in 1978 to produce `operator'
                 and `operand' counts of object programs written for the
                 AN/UYK-7 military computer. In 1981, its capability was
                 expanded so that it could apply to the AN/UYS-1 (or
                 ``Advanced Signal Processor'') military computer. The
                 former machine, made by UNIVAC, hosts the IBM-developed
                 software for the sonar and defensive weapons
                 system/command system for the TRIDENT missile launching
                 submarine and the software for the sonar for the new
                 Los Angeles-class attack submarines. The second
                 machine, made by IBM, is incorporated into several
                 military systems including the LAMPS anti-submarine
                 warfare system. The MIC program has been applied to
                 collect a large amount of data about programs written
                 for the AN/UYK-7 and AN/UYS-1 computers. From these
                 data, various of the well-known software `metrics'(1)
                 such as `volume', `language level', and `difficulty'
                 have been calculated. Some of the results obtained have
                 been reported in the literature (3,4). Probably, the
                 most significant practical use of these data, so far,
                 has been the development of formulas for use in the
                 estimation of the amount of code to be written(2,5) as
                 a function of measures of the requirements that they
                 are to implement or the (top-level) design that they
                 are to implement.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Misek-Falkoff:1982:UHS,
  author =       "Linda D. Misek-Falkoff",
  title =        "A unification of {Halstead}'s {Software Science}
                 counting rules for programs and {English} text, and a
                 claim space approach to extensions",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "80--114",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800002.800508",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In his Elements of Software Science, Maurice Halstead
                 proposed that software quality measurements could be
                 based on static lexemic analysis of the vocabularies of
                 operators and operands, and the number of occurrences
                 of each class, in computer programs. He also proposed
                 that quality issues in Natural Language text could be
                 addressed from similar perspectives, although his rules
                 for programs and for English seem to conflict. This
                 paper suggests that Halstead's seemingly disparate
                 rules for classifying the tokens of programs and the
                 tokens of English can be generally reconciled, although
                 Halstead himself does not claim such a union. The
                 thesis of Part One is a unification of his two
                 procedures, based on a linguistic partitioning between
                 ``open'' and ``closed'' classes. This unification may
                 provide new inputs to some open issues concerning
                 coding, and suggest, on the basis of a conceptual
                 rationale, an explanation as to why programs which are
                 by Halstead's definition ``impure'' might indeed be
                 confusing to the human reader. Part Two of this paper,
                 by exploring the nodes in a textual ``Claim Space,''
                 briefly considers other groupings of the classes taken
                 as primitive by Halstead, in ways which bring to light
                 alternate and supplementary sets of candidate coding
                 rules productive for study of textual quality.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Linguistics; Metrics; Natural language analysis;
                 Quality; Software science; Text complexity",
}

@Article{Estes:1982:DPO,
  author =       "George E. Estes",
  title =        "Distinguishing the potential operands in {FORTRAN}
                 programs",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "115--117",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800002.800509",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There are several possible relationships between the
                 number of potential operands and the actual operands
                 used which correlate with available data (such as
                 Akiyama's debugging data). However, additional data is
                 required to distinguish between these hypotheses. Since
                 there is a large body of programs available written in
                 FORTRAN, we wish to develop a mechanical counting
                 procedure to enumerate potential operands in FORTRAN
                 programs. We are currently developing counting rules
                 for these potential operands. Sub-routine parameters
                 and input/output variables are relatively easy to
                 identify. However, a number of FORTRAN features, such
                 as COMMON blocks and EQUIVALENCE'd variables introduce
                 serious complications. Some additional analysis of
                 usage or heuristic approaches are required to
                 differentiate potential operands in these situations.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Conte:1982:EDC,
  author =       "S. D. Conte and V. Y. Shen and K. Dickey",
  title =        "On the effect of different counting rules for control
                 flow operators on {Software Science} metrics in
                 {Fortran}",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "2",
  pages =        "118--126",
  month =        "Summer",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010673.800510",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:58:56 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Halstead in his Theory of Software Science, proposed
                 that in the Fortran language, each occurrence of a {\tt
                 GOTO i} for different label {\tt i}'s be counted as a
                 unique operator. Several writers have questioned the
                 wisdom of this method of counting GOTO's. In this
                 paper, we investigate the effect of counting GOTO's as
                 several occurrences of a single unique operator on
                 various software science metrics. Some 412 modules from
                 the International Mathematical and Statistical
                 Libraries (IMSL) are used as the data base for this
                 study.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Shanthikumar:1982:PCF,
  author =       "J. G. Shanthikumar and P. K. Varshney and K. Sriram",
  title =        "A priority cutoff flow control scheme for integrated
                 voice-data multiplexers",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "8--14",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807790",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper, we consider the flow control problem
                 for a movable boundary integrated voice-data
                 multiplexer. We propose a flow control scheme where a
                 decision rule based on the data queue length is
                 employed to cutoff the priority of voice to prevent a
                 data queue buildup. A continuous-time queueing model
                 for the integrated multiplexer is developed. The
                 performance of the flow control scheme is obtained
                 using an efficient computational procedure. A numerical
                 example is presented for illustration.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Cox:1982:DDD,
  author =       "Springer Cox",
  title =        "Data, definition, deduction: an empirical view of
                 operational analysis",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "15--20",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807791",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The theoretical aspects of operational analysis have
                 been considered more extensively than matters of its
                 application in practical situations. Since its
                 relationships differ in their applicability, they must
                 be considered separately when they are applied. In
                 order to do this, the foundations of three such
                 relationships are examined from an empirical point of
                 view. To further demonstrate the intimate connection
                 between data, definitions, and performance models, the
                 problem of measurement artifact is considered.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Perros:1982:QLD,
  author =       "H. G. Perros",
  title =        "The queue-length distribution of the {M\slash Ck\slash
                 1} queue",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "21--24",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807792",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The exact closed-form analytic expression of the
                 probability distribution of the number of units in a
                 single server queue with Poisson arrivals and Coxian
                 service time distribution is obtained.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Anderson:1982:BMP,
  author =       "Gordon E. Anderson",
  title =        "{Bernoulli} methods for predicting communication
                 processor performance",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "25--29",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800201.807793",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a method for applying Bernoulli
                 trials to predict the number of communication lines a
                 communication processor can process without losing data
                 due to character overrun conditions. First, a simple
                 method for determining the number of lines which a
                 communication processor can support without possibility
                 of character overrun will be illustrated. Then, it will
                 be shown that communication processors can tolerate
                 occasional character overrun. Finally, using Bernoulli
                 trials, the probability of character overrun and the
                 mean time between character overrun will be calculated.
                 These last two figures are useful to system designers
                 in determining the number of lines which a
                 communication processor can reasonably support.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Bernoulli trials; Character overrun; Communication
                 processor; Markov process; Protocol; Thrashing",
}

@Article{Laurmaa:1982:AHT,
  author =       "Timo Laurmaa and Markku Syrj{\"a}nen",
  title =        "{APL} and {Halstead}'s theory: a measuring tool and
                 some experiments",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "32--47",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807794",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We have designed and implemented an algorithm which
                 measures APL-programs in the sense of software science
                 by M. H. Halstead /1/. The reader is assumed to be
                 familiar with the theories of software science. Our
                 purpose has been to find the best possible algorithm to
                 automatically analyse large quantities of APL-programs.
                 We have also used our measuring tool to make some
                 experiments to find out if APL-programs and workspaces
                 obey the laws of software science or not. Becasue our
                 purpose was to analyse large quantities, i.e. hundreds
                 of programs we have not implemented an algorithm, which
                 gives exactly correct results from software science
                 point of view, because this would necessitate manual
                 clues to the analysing algorithm and thus an
                 interactive mode of analysis. Instead of it we have
                 strived for a tool, which carries out the analysis
                 automatically and as correctly as possible. In the next
                 section some difficulties encountered in the design of
                 the measuring algorithm and some inherent limitations
                 of it are discussed. Section 3 summarises the sources
                 of errors in the analysis carried out by our algorithm,
                 while section 4 gives a more detailed description of
                 the way analysis is carried out. The remaining sections
                 of this paper report on some experiments we have
                 carried out using our measuring tool. The purpose of
                 these experiments has been to evaluate the explaining
                 power of Halstead's theory in connection of
                 APL-programs. However, no attempt has been made to
                 process the results of the experiments statistically.
                 The results of the experiments have been treated here
                 only when `obvious' (in)compatibilities between the
                 theory and the results have been observed. Possible
                 reasons for the (in)compatibilities are also pointed
                 out.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Beser:1982:FES,
  author =       "Nicholas Beser",
  title =        "Foundations and experiments in software science",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "48--72",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800201.807795",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A number of papers have appeared on the subject of
                 software science; claiming the existence of laws
                 relating the size of a program and the number of
                 operands and operators used. The pre-eminent theory was
                 developed by Halstead in 1972. The thesis work focuses
                 on the examination of Halstead's theory; with an
                 emphasis on his fundamental assumptions. In particular,
                 the length estimator was analyzed to determine why it
                 yields such a high variance; the theoretical
                 foundations of software science have been extended to
                 improve the applicability of the critical length
                 estimator. This elaboration of the basic theory will
                 result in guidelines for the creation of counting rules
                 applicable to specific classes of programs, so that it
                 is possible to determine both when and how software
                 science can be applied in practice.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Schnurer:1982:PAP,
  author =       "Karl Ernst Schnurer",
  title =        "{Product Assurance Program Analyzer} ({P.A.P.A.}) a
                 tool for program complexity evaluation",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "73--74",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807796",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This tool has been developed to assist in the software
                 validation process. P.A.P.A. will measure the
                 complexity of programs and detect several program
                 anomalies. The resulting list of analyzed programs is
                 sorted in order of descending complexity. Since high
                 complexity and error-proneness are strongly related,
                 the ``critical'' programs will be found earlier within
                 the development cycle. P.A.P.A. provides syntax
                 analyzers for RPG (II/III), PSEUDOCODE (design and
                 documentation language) and PL/SIII (without macro
                 language). It may be applied during the design-,
                 coding- and test phase of software development (e.g.
                 for design- and code inspections).",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gross:1982:CME,
  author =       "David R. Gross and Mary A. King and Michael R. Murr
                 and Michael R. Eddy",
  title =        "Complexity measurement of {Electronic Switching System
                 (ESS)} software",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "75--85",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807797",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We have been developing a tool that measures the
                 complexity of software: (1) to predict the quality of
                 software products and (2) to allocate proportionally
                 more testing resources to complex modules. The software
                 being measured is real-time and controls telephone
                 switching systems. This software system is large and
                 its development is distributed over a period of several
                 years, with each release providing enhancements and bug
                 fixes. We have developed a two-stage tool consisting of
                 a parser and an analyzer. The parser operates on the
                 source code and produces operator, operand, and
                 miscellaneous tables. These tables are then processed
                 by an analyzer program that calculates the complexity
                 measures. Changes for tuning our Halstead counting
                 rules involve simple changes to the analyzer only.
                 During the development there were problems and issues
                 to be confronted dealing with static analysis and code
                 metrics. These are also described in this paper. In
                 several systems we found that more than 80\% of
                 software failures can be traced to only 20\% of the
                 modules in the system. The McCabe complexity and some
                 of Halstead's metrics score higher than the count of
                 executable statements in their correlations with field
                 failures. It is reasonable to expect that we could
                 devote more effort to the review and test of
                 high-complexity modules and increase the quality of the
                 software product that we send to the field.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hartman:1982:CTR,
  author =       "Sandra D. Hartman",
  title =        "A counting tool for {RPG}",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "86--100",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807798",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Halstead and McCabe metrics were evaluated for
                 their usefulness in identifying RPG II and RPG III
                 modules likely to contain a high number of errors. For
                 this evaluation, commercially available RPG modules
                 written within IBM were measured and assigned to low,
                 medium, or high metric value ranges. Conclusions from
                 this evaluation and RPG counting rules that were
                 concomitantly developed were presented at SCORE82 and
                 are summarized in the following report.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Naib:1982:ASS,
  author =       "Farid A. Naib",
  title =        "An application of software science to the quantitative
                 measurement of code quality",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "3",
  pages =        "101--128",
  month =        "Fall",
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1010675.807799",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:17 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The error rate of a software application may function
                 as a measure of code quality. A methodology has been
                 developed which allows for the accurate prediction of
                 the error rate and hence code quality prior to an
                 application's release. Many factors were considered
                 which could conceivably be related to the error rate.
                 These factors were divided into two categories: those
                 factors which vary with time, and those factors which
                 do not vary with time. Factors which vary with time
                 were termed environmental factors and included such
                 items as: number of users, errors submitted to date,
                 etc. Factors which do not vary with time were termed
                 internal factors and included Halstead metrics, McCabe
                 metrics and lines of code.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Blake:1982:OCT,
  author =       "Russ Blake",
  title =        "Optimal control of thrashing",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "1--10",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035295",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The method of discrete optimal control is applied to
                 control thrashing in a virtual memory. Certain
                 difficulties with several previous approaches are
                 discussed. The mechanism of optimal control is
                 presented as an effective, inexpensive alternative. A
                 simple, ideal policy is devised to illustrate the
                 method. A new feedback parameter, the thrashing level,
                 is found to be a positive and robust indicator of
                 thrashing. When applied to a real system, the idealized
                 policy effectively controlled the virtual memory.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Babaoglu:1982:HRD,
  author =       "{\"O}zalp Babao{\u{g}}lu",
  title =        "Hierarchical replacement decisions in hierarchical
                 stores",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "11--19",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035296",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "One of the primary motivations for implementing
                 virtual memory is its ability to automatically manage a
                 hierarchy of storage systems with different
                 characteristics. The composite system behaves as if it
                 were a single-level system having the more desirable
                 characteristics of each of its constituent levels. In
                 this paper we extend the virtual memory concept to
                 within each of the levels of the hierarchy. Each level
                 is thought of as containing two additional levels
                 within it. This hierarchy is not a physical one, but
                 rather an artificial one arising from the employment of
                 two different replacement algorithms. Given two
                 replacement algorithms, one of which has good
                 performance but high implementation cost and the other
                 poor performance but low implementation cost, we
                 propose and analyze schemes that result in an overall
                 algorithm having the performance characteristics of the
                 former and the cost characteristics of the latter. We
                 discuss the suitability of such schemes in the
                 management of storage hierarchies that lack page
                 reference bits.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hagmann:1982:PPR,
  author =       "Robert B. Hagmann and Robert S. Fabry",
  title =        "Program page reference patterns",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "20--29",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035298",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes a set of measurements of the
                 memory reference patterns of some programs. The
                 technique used to obtain these measurements is
                 unusually efficient. The data is presented in graphical
                 form to allow the reader to `see' how the program uses
                 memory. Constant use of a page and sequential access of
                 memory are easily observed. An attempt is made to
                 classify the programs based on their referencing
                 behavior. From this analysis it is hoped that the
                 reader will gain some insights as to the effectiveness
                 of various memory management policies.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bunt:1982:EMP,
  author =       "R. B. Bunt and R. S. Harbus and S. J. Plumb",
  title =        "The effective management of paging storage
                 hierarchies",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "30--38",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035299",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The use of storage hierarchies in the implementation
                 of a paging system is investigated. Alternative
                 approaches for managing a paging storage hierarchy are
                 described and two are selected for further study ---
                 staging and migration. Characteristic behaviour is
                 determined for each of these approaches and a series of
                 simulation experiments is conducted (using program
                 reference strings as data) for the purpose of comparing
                 them. The results clearly show migration to be a
                 superior approach from the point of view of both cost
                 and performance. Conclusions are drawn on the
                 effectiveness of each approach in practice.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hodges:1982:WCP,
  author =       "Larry F. Hodges and William J. Stewart",
  title =        "Workload characterization and performance evaluation
                 in a research environment",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "39--50",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035301",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes the process of bench-marking the
                 diverse research environment that constitutes the
                 workload of VAX/VMS at the University Analysis and
                 Control Center at North Carolina State University. The
                 benchmarking process began with a study of the system
                 load and performance characteristics over the six-month
                 period from January to June of 1981. Statistics were
                 compiled on the number of active users, CPU usage by
                 individual accounts, and peak load periods. Individual
                 users were interviewed to determine the nature and
                 major computing characteristics of the research they
                 were conducting on VAX. Information from all sources
                 was compiled to produce a benchmark that closely
                 paralleled actual system activity.\par

                 An analytic model was introduced and used in
                 conjunction with the benchmark data and hardware
                 characteristics to derive performance measures for the
                 system. Comparisons with measured system performance
                 were conducted to demonstrate the accuracy of the
                 model. The model was then employed to predict
                 performance as the system workload was increased, to
                 suggest improvements for the system, and to examine the
                 effects of those improvements.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Haring:1982:SDW,
  author =       "G{\"u}nter Haring",
  title =        "On state-dependent workload characterization by
                 software resources",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "51--57",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035302",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A method for the characterization of computer workload
                 at the task level is presented. After having divided
                 the workload into different classes using a cluster
                 technique, each cluster is further analysed by state
                 dependent transition matrices. Thus it is possible to
                 derive the most probable task sequences in each
                 cluster. This information can be used to construct
                 synthetic scripts at the task level rather than the
                 usual description at the hardware resource level.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bolzoni:1982:PIS,
  author =       "M. L. Bolzoni and M. C. Calzarossa and P. Mapelli and
                 G. Serazzi",
  title =        "A package for the implementation of static workload
                 models",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "58--67",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035303",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The general principles for constructing workload
                 models are reviewed. The differences between static and
                 dynamic workload models are introduced and the
                 importance of the classification phase for the
                 implementation of both types of workload models is
                 pointed out. All the operations required for
                 constructing static workload models have been connected
                 in a package. Its main properties and fields of
                 application are presented. The results of an
                 experimental study performed with the package on a
                 batch and interactive workload show its ease of use and
                 the accuracy of the model obtained.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{McDaniel:1982:MSI,
  author =       "Gene McDaniel",
  title =        "The {Mesa Spy}: an interactive tool for performance
                 debugging",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "68--76",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035305",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Spy is a performance evaluation tool for the Mesa
                 environment that uses a new extension to the PC
                 sampling technique. The data collection process can use
                 information in the run time call stack to determine
                 what code is responsible for the resources being
                 consumed. The Spy avoids perturbing the user
                 environment when it executes, provides symbolic output
                 at the source-language level, and can be used without
                 recompiling the program to be examined. Depending upon
                 how much complication the user asks for during data
                 collection, the Spy steals between 0.3\% and 1.8\% of
                 the cycles of a fast machine, and between 1.08\% and
                 35.9\% of the cycles on a slow machine.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "high level language performance debugging; pc
                 sampling; performance analysis",
}

@Article{Hercksen:1982:MSE,
  author =       "Uwe Hercksen and Rainer Klar and Wolfgang
                 Klein{\"o}der and Franz Knei{\ss}l",
  title =        "Measuring simultaneous events in a multiprocessor
                 system",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "77--88",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035306",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In the hierarchically organized multiprocessor system
                 EGPA, which has the structure of a pyramid, the
                 performance of concurrent programs is studied. These
                 studies are assisted by a hardware monitor
                 (Z{\"A}HLMONITOR III), which measures not only the
                 activity and idle states of CPU and channels, but
                 records the complete history of processes in the CPU
                 and interleaved I/O activities. The applied method is
                 distinguished from usual hardware measurements for two
                 reasons: it puts together the a priori independent
                 event-streams coming from the different processors to a
                 well ordered single event stream and it records not
                 only hardware but also software events. Most useful
                 have been traces of software events, which give the
                 programmer insight into the dynamic cooperation of
                 distributed subtasks of his program. This paper
                 describes the measurement method and its application to
                 the analysis of the behaviour of a highly asynchronous
                 parallel algorithm: the projection of contour lines
                 from a given point of view and the elimination of
                 hidden lines.\par

                 This work is sponsored by the Bundesminister f{\"u}r
                 Forschung und Technologie (German Federal Minister of
                 Research and Technology).",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gelenbe:1982:SDF,
  author =       "Erol Gelenbe",
  title =        "Stationary deterministic flows in discrete systems:
                 {I}",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "89--101",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035308",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider a deterministic system whose state space
                 is the $n$-dimensional first orthant. It may be
                 considered as a network of (deterministic) queues, a
                 Karp-Miller vector addition system, a Petrinet, a
                 complex computer system, etc. Weak assumptions are then
                 made concerning the asymptotic or limiting behaviour of
                 the instants at which events are observed across a cut
                 in the system: these instants may be considered as
                 `arrival' or `departure' instants. Thus, like in
                 operational analysis, we deal with deterministic and
                 observable properties and we need no stochastic
                 assumptions or restrictions (such as independence,
                 identical distributions, etc.).\par

                 We consider however asymptotic or stationary
                 properties, as in conventional queueing analysis. Under
                 our assumptions a set of standard theorems are proved:
                 concerning arrival and departure instant measures,
                 concerning, `birth and death' type equations, and
                 concerning Little's formula. Our intention is to set
                 the framework for a new approach to performance
                 modelling of computer systems in a context close to
                 that used in actual measurements, but taking into
                 account infinite time behaviour in order to take
                 advantage of the useful mathematical properties of
                 asymptotic results.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Baccelli:1982:DBR,
  author =       "F. Baccelli and E. G. Coffman",
  title =        "A data base replication analysis using an {M\slash
                 M\slash m} queue with service interruptions",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "102--107",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035309",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A study of file replication policies for distributed
                 data bases will be approached through the analysis of
                 an M/M/m queue subjected to state-independent,
                 preemptive interruptions of service. The durations of
                 periods of interruption constitute a sequence of
                 independent, identically distributed random variables.
                 Independently, the times measured from the termination
                 of one period of interruption to the beginning of the
                 next form a sequence of independent, exponentially
                 distributed random variables. Preempted customers
                 resume service at the terminations of interrupt
                 periods.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Plateau:1982:MPR,
  author =       "Brigitte Plateau and Andreas Staphylopatis",
  title =        "Modelling of the parallel resolution of a numerical
                 problem on a locally distributed computing system",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "108--117",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035310",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Modern VLSI technology has enabled the development of
                 high-speed computing systems, based upon various
                 multiprocessor architecture [1]. We can distinguish
                 several types of such systems, depending on the control
                 policies adopted, the interprocessor communication
                 modes and the degree of resource-sharing. The
                 efficiency of parallel processing may be significant in
                 various areas of computer applications; especially,
                 large numerical applications, such as the solution of
                 linear systems and differential equations, are marked
                 by the need of high computation speeds. So, the advance
                 of parallel processing systems goes together with
                 research effort in developing efficient parallel
                 algorithms [2]. The implementation of parallel
                 algorithms concerns the execution of concurrent
                 processes, assigned to the processors of the system,
                 which communicate with each other. The synchronization
                 needed at process interaction points implies the
                 existence of waiting delays, which constitute the main
                 limiting factor of parallel computation. Several
                 modelling techniques have been developed, that allow
                 the prediction and verification of parallel systems
                 performance. The two general approaches followed
                 concern deterministic models [3] and probabilistic
                 models. The latter, based on the theory of stochastic
                 processes [5] \ldots{} are well adapted to the analysis
                 of complex variable phenomena and provide important
                 measures concerning several aspects of parallel
                 processing.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bard:1982:MSD,
  author =       "Yonathan Bard",
  title =        "Modeling {I/O} systems with dynamic path selection,
                 and general transmission networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "118--129",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035312",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper examines general transmission networks, of
                 which I/O subsystems are a special case. By using the
                 maximum entropy principle, we answer questions such as
                 what is the probability that a path to a given node is
                 free when that node is ready to transmit. Systems with
                 both dynamic and fixed path selection mechanisms are
                 treated. Approximate methods for large networks are
                 proposed, and numerical examples are given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lazowska:1982:MCM,
  author =       "Edward D. Lazowska and John Zahorjan",
  title =        "Multiple class memory constrained queueing networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "130--140",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035313",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Most computer systems have a memory constraint: a
                 limit on the number of requests that can actively
                 compete for processing resources, imposed by finite
                 memory resources. This characteristic violates the
                 conditions required for queueing network performance
                 models to be separable, i.e., amenable to efficient
                 analysis by standard algorithms. Useful algorithms for
                 analyzing models of memory constrained systems have
                 been devised only for models with a single customer
                 class.\par

                 In this paper we consider the multiple class case. We
                 introduce and evaluate an algorithm for analyzing
                 multiple class queueing networks in which the classes
                 have independent memory constraints. We extend this
                 algorithm to situations in which several classes share
                 a memory constraint. We sketch a generalization to
                 situations in which a subsystem within an overall
                 system model has a population constraint.\par

                 Our algorithm is compatible with the extremely time-
                 and space-efficient iterative approximate solution
                 techniques for separable queueing networks. This level
                 of efficiency is mandatory for modelling large
                 systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "approximate solution technique; computer system
                 performance evaluation; memory constraint; population
                 constraint; queueing network model",
}

@Article{Brandwajn:1982:FAS,
  author =       "Alexandre Brandwajn",
  title =        "Fast approximate solution of multiprogramming models",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "141--149",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035332.1035314",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queueing network models of computer systems with
                 multiprogramming constraints generally do not possess a
                 product-form solution in the sense of Jackson.
                 Therefore, one is usually led to consider approximation
                 techniques when dealing with such models. Equivalence
                 and decomposition is one way of approaching their
                 solution. With multiple job classes, the equivalent
                 network may be viewed as a set of interdependent
                 queues. In general, the state-dependence in this
                 equivalent network precludes a product-form solution,
                 and the size of its state space grows rapidly with the
                 number of classes and of jobs per class. This paper
                 presents two methods for approximate solution of the
                 equivalent state-dependent queueing network. The first
                 approach is a manifold application of equivalence and
                 decomposition. The second approach, less accurate than
                 the first one, is a fast-converging iteration whose
                 computational complexity grows near-linearly with the
                 number of job classes and jobs in a class. Numerical
                 examples illustrate the accuracy of the two methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "approximate solutions; equivalence and decomposition;
                 multiprogramming; queueing network models; simultaneous
                 resource possession",
}

@Article{Agrawal:1982:ASM,
  author =       "Subhash C. Agrawal and Jeffrey P. Buzen",
  title =        "The aggregate server method for analyzing
                 serialization delays in computer systems",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "150--150",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035316",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The aggregate server method is an approximate,
                 iterative technique for analyzing the delays programs
                 encounter while waiting for entry into critical
                 sections, non-reentrant subroutines, and similar
                 software structures that cause processing to become
                 serialized. The method employs a conventional product
                 form queueing network comprised of servers that
                 represent actual I/O devices and processors, plus
                 additional aggregate servers that represent serialized
                 processing activity. The parameters of the product form
                 network are adjusted iteratively to account for
                 contention among serialized and non-serialized
                 customers at each physical device.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Smith:1982:PAS,
  author =       "Connie U. Smith and David D. Loendorf",
  title =        "Performance analysis of software for an {MIMD}
                 computer",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "151--162",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035317",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper presents a technique for modeling and
                 analyzing the performance of software for an MIMD
                 (Multiple Instruction Multiple Data) computer. The
                 models can be used as an alternative to experimentation
                 for the evaluation of various algorithms and different
                 degrees of parallelism. They can also be used to study
                 the tradeoffs involved in increasing the amount of
                 parallel computation at the expense of increased
                 overhead for synchronization and communication. The
                 detection and alleviation of performance bottlenecks is
                 facilitated.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Agre:1982:MRN,
  author =       "Jon R. Agre and Satish K. Tripathi",
  title =        "Modeling reentrant and nonreentrant software",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "163--178",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035318",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A description of software module models for computer
                 systems is presented. The software module models are
                 based on a two level description, the software level
                 and the hardware level, of the computer system. In the
                 software module level it is possible to model
                 performance effects of software traits such as
                 reentrant and nonreentrant type software modules. The
                 resulting queueing network models are, in general, not
                 of the product form class and approximation schemes are
                 employed as solution techniques.\par

                 An example of a software module model of a hypothetical
                 computer system is presented. The model is solved with
                 a simulation program and three approximation schemes.
                 The approximation results were compared with the
                 simulation results and some schemes are found to
                 produce good estimates of the effects of changing from
                 reentrant to non-reentrant software modules.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Wu:1982:OME,
  author =       "L. T. Wu",
  title =        "Operational models for the evaluation of degradable
                 computing systems",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "179--185",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035319",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Recent advances in multiprocessor technology have
                 established the need for unified methods to evaluate
                 computing systems performance and reliability. In
                 response to this modeling need, this paper considers a
                 general modeling framework which permits the modeling,
                 analysis and evaluation of degradable computing
                 systems. Within this framework, a simple and useful
                 user-oriented performance variable is identified and
                 shown to be a proper generalization of the traditional
                 notions of system performance and reliability.\par

                 The modeling and evaluation methods considered in this
                 paper provide a relatively straightforward approach for
                 integrating reliability and availability measures with
                 performance measures. The hierarchical decomposition
                 approach permits the modeling and evaluation of a
                 computing system's subsystems (e.g., hardware,
                 software, peripherals, interfaces, user demand systems)
                 as a whole rather than the traditional methods of
                 evaluating these subsystems independently. Accordingly,
                 it becomes possible to evaluate the performance of the
                 system software and the reliability of the system
                 hardware simultaneously in order to measure the
                 effectiveness of the system design. Since the
                 performance variable introduced permits the
                 characterization of the system performance according to
                 the user's view of the systems, the results obtained
                 represent more accurate assessments of the system's
                 ability to perform than the existing performance or
                 reliability measures.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Marie:1982:ECA,
  author =       "Raymond A. Marie and Patricia M. Snyder and William J.
                 Stewart",
  title =        "Extensions and computational aspects of an iterative
                 method",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "186--194",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035321",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The so-called iterative methods are among a class of
                 methods that have recently been applied to obtain
                 approximate solutions to general queueing networks. In
                 this paper it is shown that if the network contains
                 feedback loops, then it is more advantageous to
                 incorporate these loops into the analysis of the
                 station itself rather than into the analysis of the
                 complement of the station. We show how this analysis
                 may be performed for a simple two-phase Coxian server.
                 Additionally, it is shown that the number of iterations
                 required to achieve a specified degree of accuracy may
                 be considerably reduced by using a continuous updating
                 procedure in which the computed throughputs are
                 incorporated as soon as they are available, rather than
                 at the end of an iteration. An efficient computational
                 scheme is presented to accompany this continuous
                 updating. Finally a number of examples are provided to
                 illustrate these features.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Neuse:1982:HHA,
  author =       "Doug Neuse and K. Mani Chandy",
  title =        "{HAM}: the heuristic aggregation method for solving
                 general closed queueing network models of computer
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "195--212",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035322",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An approximate analytical method for estimating
                 performance statistics of general closed queueing
                 network models of computing systems is presented. These
                 networks may include queues with priority scheduling
                 disciplines and non-exponential servers and several
                 classes of jobs. The method is based on the aggregation
                 theorem (Norton's theorem) of Chandy, Herzog and Woo.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "aggregation theorem; analytical models;
                 approximations; computer system models; general closed
                 queueing networks; non-local-balance; non-product-form;
                 performance analysis; priority scheduling",
}

@Article{Eager:1982:PBH,
  author =       "D. L. Eager and K. C. Sevcik",
  title =        "Performance bound hierarchies for queueing networks",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "213--214",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035324",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In applications of queueing network models to computer
                 system performance prediction, the computational effort
                 required to obtain an exact equilibrium solution of a
                 model may not be justified by the accuracy actually
                 required. In these cases, there is a need for
                 approximation or bounding techniques that can provide
                 the necessary information at reduced cost. This paper
                 presents Performance Bound Hierarchies (PBHs) for
                 single class separable queueing networks consisting of
                 fixed rate and delay service centers. A PBH consists of
                 a hierarchy of upper (pessimistic) or lower
                 (optimistic) bounds on mean system residence time. (The
                 bounds can also be expressed as bounds on system
                 throughput or center utilizations.) Each successive
                 member requires more computational effort, and in the
                 limit, the bounds converge to the exact solution.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brumfield:1982:EAH,
  author =       "Jeffrey A. Brumfield and Peter J. Denning",
  title =        "Error analysis of homogeneous mean queue and response
                 time estimators",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "215--221",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035325",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Flow balance and homogeneity assumptions are needed to
                 derive operational counterparts of M/M/1 queue length
                 and response time formulas. This paper presents
                 relationships between the assumption errors and the
                 errors in the queue length and response time estimates.
                 A simpler set of assumption error measures is used to
                 derive bounds on the error in the response time
                 estimate. An empirical study compares actual errors
                 with their bounds.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Harbitter:1982:MTL,
  author =       "Alan Harbitter and Satish K. Tripathi",
  title =        "A model of transport level flow control",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "222--232",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035327",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A Markov Decision Process model is developed to
                 analyze buffer assignment at the transport level of the
                 ARPAnet protocol. The result of the analysis is a
                 method for obtaining an assignment policy which is
                 optimal with respect to a delay/throughput/overhead
                 reward function. The nature of the optimal policy is
                 investigated by varying parameters of the reward.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gelenbe:1982:CPC,
  author =       "Erol Gelenbe and Isi Mitrani",
  title =        "Control policies in {CSMA} local area networks:
                 {Ethernet} controls",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "233--240",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035328",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An analysis of the random carrier sense multiple
                 access channel is presented in terms of the behaviour
                 of each participating station. A detailed model of the
                 station protocol, including the control policy used in
                 case collisions, is used to derive the traffic and
                 throughput of each station. The channel traffic
                 characteristics are derived from this model and used,
                 in turn, to derive the traffic parameters entering into
                 the station model. This provides a solution method for
                 complete system characteristics for a finite
                 prespecified set of stations. The approach is then used
                 to analyse control policies of the type used in
                 ETHERNET. We show, in particular, that as the
                 propagation delay becomes small, the specific form of
                 the control policy tends to have a marginal effect on
                 network performance. The approach also applies to the
                 DANUBE and XANTHOS networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Tripathi:1982:ATF,
  author =       "Satish K. Tripathi and Alan Harbitter",
  title =        "An analysis of two flow control techniques",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "241--249",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035329",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queuing models can be useful tools in comparing the
                 performance characteristics of different flow control
                 techniques. In this paper the window control mechanism,
                 incorporated in protocols such as X.25 is compared to
                 the ARPAnet buffer reservation scheme. Multiclass
                 queuing models are used to examine message throughput
                 and delay characteristics. The analysis highlights the
                 interaction of long and short message (in terms of
                 length in packets) transmitters under the two flow
                 control techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{King:1982:MCR,
  author =       "P. J. B. King and I. Mitrani",
  title =        "Modelling the {Cambridge Ring}",
  journal =      j-SIGMETRICS,
  volume =       "11",
  number =       "4",
  pages =        "250--258",
  month =        dec,
  year =         "1982",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1035293.1035330",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 10:59:37 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Models for the local area computer network known as
                 the Cambridge Ring are developed and evaluated. Two
                 different levels of protocol are considered: the
                 hardware and the Basic Block. These require different
                 approaches and, in the second case, an approximate
                 solution method. A limited comparison between the
                 Cambridge Ring and another ring architecture --- the
                 token ring --- is carried out.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Marrevee:1982:PRT,
  author =       "J. Marrevee",
  title =        "The power of the read track and the need for a write
                 track command for disk back-up and restore utilities",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "1",
  pages =        "10--14",
  month =        dec,
  year =         "1982/1983",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041865",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:33 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer performance analysis, whether it be for
                 design, selection or improvement, has a large body of
                 literature to draw upon. It is surprising, however,
                 that few texts exist on the subject. The purpose of
                 this paper is to provide a feature analysis of the four
                 major texts suitable for professional and academic
                 purposes.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer performance evaluation; computer system
                 selection",
}

@Article{Perros:1982:MPR,
  author =       "H. G. Perros",
  title =        "A model for predicting the response time of an on-line
                 system for electronic fund transfer",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "1",
  pages =        "15--21",
  month =        "Winter",
  year =         "1982/1983",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041866",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:33 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A workshop on the theory and application of analytical
                 models to ADP system performance prediction was held on
                 March 12-13, 1979, at the University of Maryland. The
                 final agenda of the workshop is included as an
                 appendix. Six sessions were conducted: (1) theoretical
                 advances, (2) operational analysis, (3) effectiveness
                 of analytical modeling techniques, (4) validation, (5)
                 case studies and applications, and (6) modeling tools.
                 A summary of each session is presented below. A list of
                 references is provided for more detailed information.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Augustin:1982:CCD,
  author =       "Reinhard Augustin and Klaus-J{\"u}rgen B{\"u}scher",
  title =        "Characteristics of the {COX}-distribution",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "1",
  pages =        "22--32",
  month =        dec,
  year =         "1982/1983",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041867",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:33 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concept of a `working-set' of a program running in
                 a virtual memory environment is now so familiar that
                 many of us fail to realize just how little we really
                 know about what it is, what it means, and what can be
                 done to make such knowledge actually useful. This
                 follows, perhaps, from the abstract and apparently
                 intangible facade that tends to obscure the meaning of
                 working set. What we cannot measure often ranks high in
                 curiosity value, but ranks low in pragmatic utility.
                 Where we have measures, as in the page-seconds of
                 SMF/MVS, the situation becomes even more curious: here
                 a single number purports to tell us something about the
                 working set of a program, and maybe something about the
                 working sets of other concurrent programs, but not very
                 much about either. This paper describes a case in which
                 the concept of the elusive working set has been
                 encountered in practice, has been intensively analyzed,
                 and finally, has been confronted in its own realm. It
                 has been trapped, wrapped, and, at last, forced to
                 reveal itself for what it really is. It is not a
                 number! Yet it can be measured. And what it is,
                 together with its measures, turns out to be something
                 not only high in curiosity value, but also something
                 very useful as a means to predict the page faulting
                 behavior of a program running in a relatively complex
                 multiprogrammed environment. The information presented
                 here relates to experience gained during the conversion
                 of a discrete event simulation model to a hybrid model
                 which employs analytical techniques to forecast the
                 duration of `steady-state' intervals between mix-change
                 events in the simulation of a network-scheduled job
                 stream processing on a 370/168-3AP under MVS. The
                 specific `encounter' with the concept of working sets
                 came about when an analytical treatment of program
                 paging was incorporated into the model. As a result of
                 considerable luck, ingenuity, and brute-force
                 empiricism, the model won. Several examples of
                 empirically derived characteristic working set
                 functions, together with typical model results, are
                 supported with a discussion of relevant modeling
                 techniques and areas of application.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Perros:1984:QNB,
  author =       "H. G. Perros",
  title =        "Queueing networks with blocking: a bibliography",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "2",
  pages =        "8--12",
  month =        "Spring-Summer",
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041823.1041824",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:34 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In recent years, queueing networks with blocking have
                 been studied by researchers from various research
                 communities such as Computer Performance Modelling,
                 Operations Research, and Industrial Engineering. In
                 view of this, related results are scattered throughout
                 various journals. The bibliography given below is the
                 result of a first attempt to compile an exhaustive list
                 of related papers in which analytic investigations
                 (exact or approximate) or numerical investigations of
                 queueing networks with blocking have been reported.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{DeMarco:1984:ASS,
  author =       "Tom DeMarco",
  title =        "An algorithm for sizing software products",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "2",
  pages =        "13--22",
  month =        "Spring-Summer",
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041823.1041825",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:34 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper reports on efforts to develop a cost
                 forecasting scheme based on a Function Metric called
                 System BANG. A Function Metric is a quantifiable
                 indication of system size and complexity derived
                 directly from a formal statement of system requirement.
                 Conclusions from a small sample of projects are
                 presented.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Fishwick:1984:PPG,
  author =       "Paul A. Fishwick and Stefan Feyock",
  title =        "{PROFGEN}: a procedure for generating machine
                 independent high-level language profilers",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "2",
  pages =        "27--31",
  month =        "Spring-Summer",
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041823.1041826",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:34 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Many of the tools used in software metrics for
                 evaluating the execution characteristics of a program
                 are predicated on specific counting rules for operators
                 and operands [1, 2]. The analyst may use these counting
                 techniques to determine such program attributes as
                 estimation of object code size prior to actual
                 compilation and the relative efficiencies of various
                 language compilers. Operator/operand measures provide
                 useful results for certain analyses, but a deficiency
                 exists in that the data derived from this technique
                 does not directly reflect the program structure
                 afforded by a high-level language such as FORTRAN,
                 Pascal, or Ada. There are many instances where it is
                 desirable to measure the program at the source level
                 where the execution data may be directly associated
                 with specific high level program units such as source
                 statements and blocks.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Rajaraman:1984:PML,
  author =       "M. K. Rajaraman",
  title =        "Performance measures for a local network",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "2",
  pages =        "34--37",
  month =        "Spring-Summer",
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041823.1041827",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:34 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Parameters that influence the performance of a local
                 network consisting of three mainframes and an array
                 processor are identified. Performance measures are
                 developed for this network and their significance in
                 the operation and use of the network are discussed.
                 Some aspects of implementing such measures in a local
                 network are examined.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Jones:1984:PEJ,
  author =       "Greg A. Jones",
  title =        "Performance evaluation of a job scheduler",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "2",
  pages =        "38--43",
  month =        "Spring-Summer",
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041823.1041828",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:34 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "International Business Machines' (IBM) Job Entry
                 Subsystem 3 (JES 3) is the integral part of the MVS
                 operating system that is responsible for controlling
                 all jobs from their entry into the system until their
                 exit out of the system. JES 3 maintains total awareness
                 of each job while it is in the system and services the
                 jobs upon request. These services include: preparing
                 the job for execution, selecting the job for execution,
                 and the processing of SYSIN/SYSOUT data. This paper
                 reports the findings of the performance evaluation
                 study of JES 3 through the use of a General Purpose
                 Simulation System (GPSS) model of JES 3 and exhibits
                 the benefits of using simulation models to study
                 complex systems such as JES 3. Once the model was
                 developed, it was used to evaluate the effects of
                 varying the job scheduler parameters of JES 3 in the
                 batch job environment. The input workload and service
                 times for the model were derived from System Management
                 Facilities (SMF) and Resource Management Facilities
                 (RMF) data from the modeled system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Clark:1984:NCP,
  author =       "Jon D. Clark and Thomas C. Richards",
  title =        "A note on the cost-performance ratios of {IBM}'s
                 {43XX} series",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "2",
  pages =        "44--45",
  month =        "Spring-Summer",
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041823.1041829",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:34 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Pricing policies of computers with various performance
                 capabilities are usually assumed to be non-linear due
                 to economies-of-scale. This article analyzes the
                 cost-performance ratios of a single IBM product line,
                 the 43XX series and found this performance
                 characteristic to be surprisingly linear but with great
                 deal of individual variation.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer; cost-performance; performance evaluation",
}

@Article{Coffman:1984:RPP,
  author =       "E. G. {Coffman, Jr.}",
  title =        "Recent progress in the performance evaluation of
                 fundamental allocation algorithms",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "2--6",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809308",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Our understanding of several allocation algorithms
                 basic to operating systems and to data base systems has
                 improved substantially as a result of a number of
                 research efforts within the past one or two years. The
                 results have stirred considerable excitement in both
                 theorists and practitioners. This is not only because
                 of the inroads made into long-standing problems, but
                 also because of the surprising nature of the results;
                 in particular, we refer to proofs that certain
                 classical algorithms described as approximate are in
                 fact optimal in a strong probabilistic sense. The work
                 discussed here will be classified according to the
                 application areas, archival and dynamic storage
                 allocation. In both cases we are concerned with the
                 packing problems that arise in making efficient use of
                 storage. Equivalents of the archival problems also have
                 importance in scheduling applications [4]; however, we
                 shall focus exclusively on the storage allocation
                 setting.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ferrari:1984:FAW,
  author =       "Domenico Ferrari",
  title =        "On the foundations of artificial workload design",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "8--14",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809309",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The principles on which artificial workload model
                 design is currently based are reviewed. Design methods
                 are found wanting for three main reasons: their
                 resource orientation, with the selection of resources
                 often unrelated to the performance impact of resource
                 demands; their avoiding to define an accuracy criterion
                 for the resulting workload model; and their ignoring
                 the dynamics of the workload to be modeled. An attempt
                 at establishing conceptual foundations for the design
                 of interactive artificial workloads is described. The
                 problems found in current design methods are taken into
                 account, and sufficient conditions for the
                 applicability of these methods are determined. The
                 study also provides guidance for some of the decisions
                 to be made in workload model design using one of the
                 current methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Perez-Davila:1984:PIF,
  author =       "Alfredo de J. Perez-Davila and Lawrence W. Dowdy",
  title =        "Parameter interdependencies of file placement models
                 in a {Unix} system",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "15--26",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809310",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A file assignment case study of a computer system
                 running Unix is presented. A queueing network model of
                 the system is constructed and validated. A modeling
                 technique for the movement of files between and within
                 disks is proposed. A detailed queueing network model is
                 constructed for several file distributions in secondary
                 storage. The interdependencies between the speed of the
                 CPU, the swapping activity, the visit ratios and the
                 multiprogramming level are examined and included in the
                 modeling technique. The models predict the performance
                 of several possible file assignments. The various file
                 assignments are implemented and comparisons between the
                 predicted and actual performance are made. The models
                 are shown to accurately predict user response time.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bunt:1984:MPL,
  author =       "Richard B. Bunt and Jennifer M. Murphy and Shikharesh
                 Majumdar",
  title =        "A measure of program locality and its application",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "28--40",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809311",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Although the phenomenon of locality has long been
                 recognized as the single most important characteristic
                 of program behaviour, relatively little work has been
                 done in attempting to measure it. Recent work has led
                 to the development of an intrinsic measure of program
                 locality based on the Bradford--Zipf distribution.
                 Potential applications for such a measure are many, and
                 include the evaluation of program restructuring methods
                 (manual and automatic), the prediction of system
                 performance, the validation of program behaviour
                 models, and the enhanced understanding of the phenomena
                 that characterize program behaviour. A consideration of
                 each of these areas is given in connection with the
                 proposed measure, both to increase confidence in the
                 validity of the measure and to illustrate a methodology
                 for dealing with such problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Krzesinski:1984:ILM,
  author =       "A. Krzesinski and J. Greyling",
  title =        "Improved lineariser methods for queueing networks with
                 queue dependent centres",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "41--51",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809312",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Lineariser is an MVA-based technique developed for
                 the approximate solution of large multiclass product
                 form queueing networks. The Lineariser is capable of
                 computing accurate solutions for networks of fixed rate
                 centres. However, problems arise when the Lineariser is
                 applied to networks containing centres with queue
                 dependent service rates. Thus networks exist which seem
                 well suited (a large number of lightly loaded centres,
                 large numbers of customers in each closed chain) for
                 Lineariser solution but whose queue dependent centres
                 cannot be solved accurately by the Lineariser method.
                 Examples have also been found where the Lineariser
                 computes accurate values for the queue lengths, waiting
                 times and throughputs though the values computed for
                 the queue length distributions are totally in error.
                 This paper presents an Improved Lineariser which
                 computes accurate approximate solutions for multiclass
                 networks containing an arbitrary number of queue
                 dependent centres. The Improved Lineariser is based on
                 MVA results and is therefore simple to implement and
                 numerically well behaved. The Improved Lineariser has
                 storage and computation requirements of order (MN)
                 locations and (MNJ2) arithmetic operations where $M$ is
                 the number of centres, $N$ the total number of
                 customers and $J$ the number of closed chains. Results
                 from 130 randomly generated test networks are used to
                 compare the accuracy of the standard and Improved
                 Linearisers. The Improved Lineariser is consistently
                 more accurate (tolerance errors on all performance
                 measures less than 2 per cent) than the standard
                 Lineariser and its accuracy is insensitive to the size
                 of the network model. In addition, the Improved
                 Lineariser computes accurate solutions for networks
                 which cause the standard Lineariser to fail.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Approximate solutions; Error analysis; Mean value
                 analysis; Multiclass queueing networks; Product from
                 solutions",
}

@Article{Zahorjan:1984:ILD,
  author =       "John Zahorjan and Edward D. Lazowska",
  title =        "Incorporating load dependent servers in approximate
                 mean value analysis",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "52--62",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800264.809313",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queueing network performance modelling technology has
                 made tremendous strides in recent years. Two of the
                 most important developments in facilitating the
                 modelling of large and complex systems are hierarchical
                 modelling, in which a single load dependent server is
                 used as a surrogate for a subsystem, and approximate
                 mean value analysis, in which reliable approximate
                 solutions of separable models are efficiently obtained.
                 Unfortunately, there has been no successful marriage of
                 these two developments; that is, existing algorithms
                 for approximate mean value analysis do not accommodate
                 load dependent servers reliably.\par

                 This paper presents a successful technique for
                 incorporating load dependent servers in approximate
                 mean value analysis. We consider multiple class models
                 in which the service rate of each load dependent server
                 is a function of the queue length at that server. In
                 other words, load dependent center $k$ delivers
                 ``service units'' at a total rate of $ f_k(n_k)$ when $
                 n_k$ customers are present. We present extensive
                 experimental validation which indicates that our
                 algorithm contributes an average error in response
                 times of less than 1\% compared to the (much more
                 expensive) exact solution.\par

                 In addition to the practical value of our algorithm,
                 several of the techniques that it employs are of
                 independent interest.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Agrawal:1984:RTP,
  author =       "Subhash C. Agrawal and Jeffrey P. Buzen and Annie W.
                 Shum",
  title =        "{Response Time Preservation}: a general technique for
                 developing approximate algorithms for queueing
                 networks",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "63--77",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809314",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Response Time Preservation (RTP) is introduced as a
                 general technique for developing approximate analysis
                 procedures for queueing networks. The underlying idea
                 is to replace a subsystem by an equivalent server whose
                 response time in isolation equals that of the entire
                 subsystem in isolation. The RTP based approximations,
                 which belong to the class of decomposition
                 approximations, can be viewed as a dual of the Norton's
                 Theorem approach for solving queueing networks since it
                 matches response times rather than throughputs. The
                 generality of the RTP technique is illustrated by
                 developing solution procedures for several important
                 queueing systems which violate product form
                 assumptions. Examples include FCFS servers with general
                 service times, FCFS servers with different service
                 times for multiple classes, priority scheduling, and
                 distributed systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Mussi:1984:EPE,
  author =       "Ph. Mussi and Ph. Nain",
  title =        "Evaluation of parallel execution of program tree
                 structures",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "78--87",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809315",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We define and evaluate two policies (NA-policy,
                 A-policy) for parallel execution of program tree
                 structures. Via a probabilistic model we analytically
                 determine, for each policy, the Laplace--Stieltjes
                 transform for the tree processing time distribution.
                 The acceleration of the program execution time achieved
                 when adding processors to a single processor
                 environment, is computed and plotted for each policy.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Sanguinetti:1984:POP,
  author =       "John Sanguinetti",
  title =        "Program optimization for a pipelined machine a case
                 study",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "88--95",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800264.809316",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The Amdahl 580 processor is a pipelined processor
                 whose performance can be affected by characteristics of
                 the instructions it executes. This paper describes
                 certain optimizations made to a set of system software
                 routines during their development. The optimization
                 effort was driven by the execution frequencies of
                 common paths through the programs in question, and by
                 the execution characteristics of those paths, as shown
                 by a processor simulator. Path optimization itself was
                 done with both general program optimization techniques
                 and with techniques specific to the particular
                 characteristics of the 580's pipeline. Overall, the
                 average execution time for these routines was reduced
                 by over 50\%.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Turner:1984:PDB,
  author =       "Rollins Turner and Jeffrey Schriesheim and Indrajit
                 Mitra",
  title =        "Performance of a {DECnet} based disk block server",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "96--104",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809317",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This report describes an experimental disk block
                 server implemented for the RSX-11M Operating System
                 using DECnet. The block server allows user programs on
                 one system to access files on a disk physically located
                 on a different system. The actual interface is at the
                 level of physical blocks and IO transfers. Results of
                 basic performance measurements are given, and explained
                 in terms of major components. Performance predictions
                 are made for servers of this type supporting more
                 complex workloads.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stavenow:1984:TDC,
  author =       "Bengt Stavenow",
  title =        "Throughput-delay characteristics and stability
                 considerations of the access channel in a mobile
                 telephone system",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "105--112",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809318",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper a performance study of the access
                 channel in a cellular mobile telephone system /1/ is
                 presented. The method used in the Cellular System for
                 multiplexing the population of mobile terminals over
                 the access channel is a hybrid between the methods
                 known as CSMA/CD and BTMA. In the paper we extend an
                 analysis of CSMA/CD to accommodate the function of the
                 particular random multiaccess protocol. Results are
                 shown which illustrate the equilibrium channel
                 performance and the approximate
                 stability-throughput-delay tradeoff. Finally an
                 estimate of the average message delay is given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Williams:1984:PQD,
  author =       "Elizabeth Williams",
  title =        "Processor queueing disciplines in distributed
                 systems",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "113--119",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809319",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A distributed program consists of processes, many of
                 which can execute concurrently on different processors
                 in a distributed system of processors. When several
                 processes from the same or different distributed
                 programs have been assigned to a processor in a
                 distributed system, the processor must select the next
                 process to run. The following two questions are
                 investigated: What is an appropriate method for
                 selecting the next process to run? Under what
                 conditions are substantial gains in performance
                 achieved by an appropriate method of selection?
                 Standard processor queueing disciplines, such as
                 first-come-first-serve and round-robin-fixed-quantum,
                 are studied. The results for four classes of queueing
                 disciplines tested on three problems are presented.
                 These problems were run on a testbed, consisting of a
                 compiler and simulator used to run distributed programs
                 on user-specified architectures.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stephens:1984:CBH,
  author =       "Lindsey E. Stephens and Lawrence W. Dowdy",
  title =        "Convolutional bound hierarchies",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "120--133",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809320",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The time required to find the exact solution of a
                 product-form queueing network model of a computer
                 system can be high. Faster and cheaper methods of
                 solution, such as approximations, are natural
                 alternatives. However, the errors incurred when using
                 an approximation technique should be bounded. Several
                 recent techniques have been developed which provide
                 solution bounds. These bounding techniques have the
                 added benefit that the bounds can be made tighter if
                 extra computational effort is expended. Thus, a smooth
                 tradeoff of cost and accuracy is available. These
                 techniques are based upon mean value analysis. In this
                 paper a new bounding technique based upon the
                 convolution algorithm is presented. It provides a
                 continuous range of cost versus accuracy tradeoffs for
                 both upper and lower bounds. The bounds produced by the
                 technique converge to the exact solution as the
                 computational effort approaches that of convolution.
                 Also, the technique may be used to improve any existing
                 set of bounds.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Suri:1984:NBB,
  author =       "Rajan Suri and Gregory W. Diehl",
  title =        "A new `building block' for performance evaluation of
                 queueing networks with finite buffers",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "134--142",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809321",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We propose a new `building block', for analyzing
                 queueing networks. This is a model of a server with a
                 variable buffer-size. Such a model enables efficient
                 analysis of certain queueing networks with blocking due
                 to limited buffer spaces, since it uses only
                 product-form submodels. The technique is extensively
                 tested, and found to be reasonably accurate over a wide
                 range of parameters. Several examples are given,
                 illustrating practical situations for which our model
                 would prove to be a useful performance analysis tool,
                 specially since it is simple to understand, and easy to
                 implement using standard software for closed queueing
                 networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "Approximate analysis; Blocking; Performance modelling;
                 Performance prediction; Product form networks; Queueing
                 networks",
}

@Article{Lavenberg:1984:SAE,
  author =       "Stephen S. Lavenberg",
  title =        "A simple analysis of exclusive and shared lock
                 contention in a database system",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "143--148",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809322",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider a probabilistic model of locking in a
                 database system in which an arriving transaction is
                 blocked and lost when its lock requests conflict with
                 the locks held by currently executing transactions.
                 Both exclusive and shared locks are considered. We
                 derive a simple asymptotic expression for the
                 probability of blocking which is exact to order $ 1 / N
                 $ where $N$ is the number of lockable items in the
                 database. This expression reduces to one recently
                 derived by Mitra and Weinberger for the special case
                 where all locks are exclusive.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Becker:1984:MMS,
  author =       "S. T. Becker and K. M. Rege and B. Sengupta",
  title =        "A modeling methodology for sizing a computer based
                 system in a netted environment",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "149--157",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809323",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper describes a hybrid model, combining both
                 analytical and simulation techniques, which was
                 developed to study the performance of a netted computer
                 based system. The computer based system that was
                 modeled is the Facility Assignment and Control System
                 (FACS). This system is presently being deployed within
                 several Bell Operating Companies to inventory and
                 assign central office and outside plant facilities. A
                 key feature of the model is its ability to characterize
                 the dynamic nature of FACS. An understanding of this
                 dynamic nature is necessary in establishing important
                 operational guidelines such as allowable CPU
                 utilization, levels of multiprogramming and priority of
                 transaction processing. In addition, the model allows
                 the user to investigate the sensitivity of the system
                 to a wide range of conditions. Typical study items
                 could include the effect of various load scenarios,
                 ability of the system to meet performance objectives,
                 and different hardware configurations. As part of this
                 paper, both the practical aspects of modeling a netted
                 computer based system and the theoretical development
                 of the hybrid model are considered.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Peachey:1984:EIS,
  author =       "Darwyn R. Peachey and Richard B. Bunt and Carey L.
                 Williamson and Tim B. Brecht",
  title =        "An experimental investigation of scheduling strategies
                 for {UNIX}",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "158--166",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809324",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The scheduler used in an operating system is an
                 important factor in the performance of the system under
                 heavy load. This paper describes the scheduling
                 philosophy employed in the UNIX operating system and
                 outlines the standard scheduling strategies. Modified
                 strategies which address deficiencies in the standard
                 strategies are described. The effectiveness of these
                 modified strategies is assessed by means of performance
                 experiments.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Menasce:1984:PEI,
  author =       "Daniel A. Menasc{\'e} and Leonardo Lellis P. Leite",
  title =        "Performance evaluation of isolated and interconnected
                 token bus local area networks",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "167--175",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809325",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The token bus based local area network, REDPUC,
                 designed and implemented at the Pont{\'\i}ficia
                 Universidade Cat{\'o}lica do Rio de Janeiro is briefly
                 described. Analytic models are presented, which allow
                 one to obtain an approximation for the average packet
                 delay, as well as exact upper and lower bounds for the
                 same performance measure. A performance evaluation of
                 interconnected local networks is also given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Agrawal:1984:UAS,
  author =       "Subhash C. Agrawal and Jeffrey P. Buzen and Ashok K.
                 Thareja",
  title =        "A Unified Approach to Scan Time Analysis of Token
                 Rings and Polling Networks",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "176--185",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809326",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Token rings and multipoint polled lines are two widely
                 used network interconnection techniques. The general
                 concept of cyclic allocation processes is defined and
                 used to characterize token passing and polling in these
                 networks. Scan time, the time to poll all nodes at
                 least once, is an important quantity in the response
                 time analysis of such networks. We derive expressions
                 for the mean and variance of scan times using a direct,
                 operational approach. Resulting expressions are general
                 and are applicable to both exhaustive and
                 non-exhaustive service. The effect of higher level
                 protocols is easily incorporated in the analysis via
                 calculations of constituent quantities. The expression
                 for mean scan time is exact and depends only on the
                 means of message transmission times and arrival rates.
                 The approximate analysis of variance takes into account
                 the correlation between message transmissions at
                 different nodes. Expected level of accuracy is
                 indicated by an example.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Brandwajn:1984:EAM,
  author =       "Alexandre Brandwajn and William M. McCormack",
  title =        "Efficient approximation for models of multiprogramming
                 with shared domains",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "186--194",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809327",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queueing network models of multiprogramming systems
                 with memory constraints and multiple classes of jobs
                 are important in representing large commercial computer
                 systems. Typically, an exact analytical solution of
                 such models is unavailable, and, given the size of
                 their state space, the solution of models of this type
                 is approached through simulation and/or approximation
                 techniques. Recently, a computationally efficient
                 iterative technique has been proposed by Brandwajn,
                 Lazowska and Zahorjan for models of systems in which
                 each job is subject to a separate memory constraint,
                 i.e., has its own memory domain. In some important
                 applications, it is not unusual, however, to have
                 several jobs of different classes share a single memory
                 ``domain'' (e.g., IBM's Information Management System).
                 We present a simple approximate solution to the shared
                 domain problem. The approach is inspired by the
                 recently proposed technique which is complemented by a
                 few approximations to preserve the conceptual
                 simplicity and computational efficiency of this
                 technique. The accuracy of the results is generally in
                 fair agreement with simulation.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bondi:1984:RTP,
  author =       "Andr{\'e} B. Bondi and Jeffrey P. Buzen",
  title =        "The response times of priority classes under
                 preemptive resume in {M/G/m} queues",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "195--201",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/800264.809328",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Approximations are given for the mean response times
                 of each priority level in a multiple-class multiserver
                 M/G/m queue operating under preemptive resume
                 scheduling. The results have been tested against
                 simulations of systems with two and three priority
                 classes and different numbers of servers.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Thomasian:1984:AQN,
  author =       "Alexander Thomasian and Paul Bay",
  title =        "Analysis of {Queueing Network Models} with population
                 size constraints and delayed blocked customers",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "3",
  pages =        "202--216",
  month =        aug,
  year =         "1984",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1031382.809329",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:00:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Queueing Network Models --- QNM's with population size
                 constraints and delayed blocked customers occur due to
                 MultiProgramming Level --- MPL constraints in computer
                 systems and window flow-control mechanisms in Computer
                 Communication Networks --- CCN's. The computational
                 cost of existing algorithms is unacceptable for large
                 numbers of chains and high population sizes. A fast
                 approximate solution technique based on load
                 concealment is presented to solve such QNM's. The
                 solution procedure is non-iterative in the case of
                 fixed rate Poisson arrivals, while iteration is
                 required in the case of quasi-random arrivals. Each
                 iteration requires the solution of a single chain
                 network of queues comprised of stations visited by each
                 chain. We then present an algorithm to detect saturated
                 chains and determine their maximum throughput. A fast
                 solution algorithm due to Reiser for closed chains is
                 also extended to the case of quasi-random arrivals. The
                 accuracy of the proposed solution techniques is
                 compared to previous techniques by applying it to a
                 test case, reported in the literature, and a set of
                 randomly generated examples.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gaffney:1984:IEP,
  author =       "John E. Gaffney",
  title =        "Instruction entropy, a possible measure of
                 program\slash architecture compatibility",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "4",
  pages =        "13--18",
  year =         "1984/1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041865",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Computer performance analysis, whether it be for
                 design, selection or improvement, has a large body of
                 literature to draw upon. It is surprising, however,
                 that few texts exist on the subject. The purpose of
                 this paper is to provide a feature analysis of the four
                 major texts suitable for professional and academic
                 purposes.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "computer performance evaluation; computer system
                 selection",
}

@Article{Sauer:1984:NSS,
  author =       "Charles H. Sauer",
  title =        "Numerical solution of some multiple chain queueing
                 networks",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "4",
  pages =        "19--28",
  month =        dec,
  year =         "1984/1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041866",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A workshop on the theory and application of analytical
                 models to ADP system performance prediction was held on
                 March 12-13, 1979, at the University of Maryland. The
                 final agenda of the workshop is included as an
                 appendix. Six sessions were conducted: (1) theoretical
                 advances, (2) operational analysis, (3) effectiveness
                 of analytical modeling techniques, (4) validation, (5)
                 case studies and applications, and (6) modeling tools.
                 A summary of each session is presented below. A list of
                 references is provided for more detailed information.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Thomasian:1984:SCS,
  author =       "Alexander Thomasian and Kameshwar Gargeya",
  title =        "Speeding up computer system simulations using
                 hierarchical modeling",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "4",
  pages =        "34--39",
  month =        dec,
  year =         "1984/1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041867",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concept of a `working-set' of a program running in
                 a virtual memory environment is now so familiar that
                 many of us fail to realize just how little we really
                 know about what it is, what it means, and what can be
                 done to make such knowledge actually useful. This
                 follows, perhaps, from the abstract and apparently
                 intangible facade that tends to obscure the meaning of
                 working set. What we cannot measure often ranks high in
                 curiosity value, but ranks low in pragmatic utility.
                 Where we have measures, as in the page-seconds of
                 SMF/MVS, the situation becomes even more curious: here
                 a single number purports to tell us something about the
                 working set of a program, and maybe something about the
                 working sets of other concurrent programs, but not very
                 much about either. This paper describes a case in which
                 the concept of the elusive working set has been
                 encountered in practice, has been intensively analyzed,
                 and finally, has been confronted in its own realm. It
                 has been trapped, wrapped, and, at last, forced to
                 reveal itself for what it really is. It is not a
                 number! Yet it can be measured. And what it is,
                 together with its measures, turns out to be something
                 not only high in curiosity value, but also something
                 very useful as a means to predict the page faulting
                 behavior of a program running in a relatively complex
                 multiprogrammed environment. The information presented
                 here relates to experience gained during the conversion
                 of a discrete event simulation model to a hybrid model
                 which employs analytical techniques to forecast the
                 duration of `steady-state' intervals between mix-change
                 events in the simulation of a network-scheduled job
                 stream processing on a 370/168-3AP under MVS. The
                 specific `encounter' with the concept of working sets
                 came about when an analytical treatment of program
                 paging was incorporated into the model. As a result of
                 considerable luck, ingenuity, and brute-force
                 empiricism, the model won. Several examples of
                 empirically derived characteristic working set
                 functions, together with typical model results, are
                 supported with a discussion of relevant modeling
                 techniques and areas of application.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Elshoff:1984:PMP,
  author =       "James L. Elshoff",
  title =        "The {PEEK} measurement program",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "4",
  pages =        "40--53",
  month =        "Winter",
  year =         "1984/1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041868",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper discussed the problems encountered and
                 techniques used in conducting the performance
                 evaluation of a multi-processor on-line manpower data
                 collection system. The two main problems were: (1) a
                 total lack of available software tools, and (2) many
                 commonly used hardware monitor measures (e.g., CPU
                 busy, disk seek in progress) were either meaningless or
                 not available. The main technique used to circumvent
                 these problems was detailed analysis of one-word
                 resolution memory maps. Some additional data collection
                 techniques were (1) time-stamped channel measurements
                 used to derive some system component utilization
                 characteristics and (2) manual stopwatch timings used
                 to identify the system's terminal response times.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hac:1984:STM,
  author =       "Anna H{\'a}c",
  title =        "A survey of techniques for the modeling of
                 serialization delays in computer systems",
  journal =      j-SIGMETRICS,
  volume =       "12",
  number =       "4",
  pages =        "54--56",
  month =        dec,
  year =         "1984/1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041864.1041869",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:43 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The current status of an implementation of a
                 methodology relating load, capacity and service for IBM
                 MVS computer systems is presented. This methodology
                 encompasses systems whose workloads include batch, time
                 sharing and transaction processing. The implementation
                 includes workload classification, mix representation
                 and analysis, automatic benchmarking, and exhaust point
                 forecasting.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Mosleh:1985:BPR,
  author =       "Ali Mosleh and E. Richard Hilton and Peter S. Browne",
  title =        "{Bayesian} probabilistic risk analysis",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "1",
  pages =        "5--12",
  month =        jun,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041838.1041839",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:44 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "As modern business and financial institutions have
                 come to rely more and more on large scale computers for
                 management support, the magnitude of the risks and
                 their potential consequences has increased
                 correspondingly. In addition, large systems involving
                 multiprocessing, resource sharing, and distributed
                 processing have given rise to a new generation of risks
                 due to the increased vulnerabilities of such large
                 scale systems and the potential for fraudulent or
                 malicious misuse of their resources. Somehow, these
                 risks must be managed since either deliberate or
                 accidental impairment of these large scale systems can
                 have serious consequences for the business. That is,
                 threats must be identified, and the likelihood of their
                 occurrences and the elements of the system vulnerable
                 to each of these threats must be established. Any
                 program for risk management must begin with a risk
                 analysis to compare the vulnerabilities in order to
                 pinpoint and rank the system's weaknesses and to
                 provide a guide for the cost-effective, systematic
                 reduction of the probability of the system's being
                 subverted or otherwise impaired.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gong:1985:CMB,
  author =       "Huisheng Gong and Monika Schmidt",
  title =        "A complexity measure based on selection and nesting",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "1",
  pages =        "14--19",
  month =        jun,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041838.1041840",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:44 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Many concepts concerning the quantification of program
                 complexity have been developed during the last few
                 years. One of the most accepted and easy-to-apply
                 complexity measures, McCabe's cyclomatic number, has
                 been discussed and improved in several studies. The
                 cyclomatic number only considers the decision structure
                 of a program. Therefore, this paper proposes a new
                 method for calculating program complexity, the concept
                 of postdomination. This takes into account the degree
                 of nesting of a program. Combining this method and the
                 cyclomatic number, a new complexity measure will be
                 defined.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "cyclomatic number; degree of nesting; forward
                 dominance; program complexity",
}

@Article{Knudson:1985:PMS,
  author =       "Michael E. Knudson",
  title =        "A performance measurement and system evaluation
                 project plan proposal",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "1",
  pages =        "20--31",
  month =        jun,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041838.1041841",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:44 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This document is an outline for a performance
                 measurement and evaluation effort. Performance
                 measurements consist of producing data showing
                 frequency and execution times for components of
                 computer systems. Components implies: (1) hardware, (2)
                 ucode, (3) macrocode, (4) applications software,
                 (5)systems (e.g., utilities in an operating-system
                 environment). Evaluation can be broken down into
                 several areas. Principle areas of interest are
                 comparative performance evaluation and an analysis of a
                 system's structure/behavior. Comparative evaluation
                 consists of: relative performance measurements of
                 different machines; a summary of collected data; and an
                 analysis of a system's structure, including the
                 production of data describing the interrelationship of
                 system components. This data may be narrative, but the
                 preferred technique is a graphical presentation showing
                 component relationships.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ejiogu:1985:SMS,
  author =       "Lem O. Ejiogu",
  title =        "A simple measure of software complexity",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "1",
  pages =        "33--47",
  month =        jun,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041838.1041842",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:44 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Every science mast have its own method of
                 investigation built on a sound foundation that is
                 empirical, justifiable and verifiable. Software
                 metrics, too, can benefit from this principle. A
                 complex aggregate of tools, ideas, methodologies,
                 programming languages, and varieties of applications go
                 into the development, design, manufacture and
                 maintenance of software. The combinations impose
                 another level of complexity on software.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Eager:1985:CRI,
  author =       "Derek L. Eager and Edward D. Lazowska and John
                 Zahorjan",
  title =        "A comparison of receiver-initiated and
                 sender-initiated adaptive load sharing (extended
                 abstract)",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "1--3",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317802",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "One goal of locally distributed systems is to
                 facilitate resource sharing. Most current locally
                 distributed systems, however, share primarily data,
                 data storage devices, and output devices; there is
                 little sharing of computational resources. Load sharing
                 is the process of sharing computational resources by
                 transparently distributing the system workload. System
                 performance can be improved by transferring work from
                 nodes that are heavily loaded to nodes that are lightly
                 loaded. Load sharing policies may be either static or
                 adaptive. Static policies use only information about
                 the average behavior of the system; transfer decisions
                 are independent of the actual current system state.
                 Static policies may be either deterministic (e.g.,
                 ``transfer all compilations originating at node $A$ to
                 server $B$'') or probabilistic (e.g., ``transfer half
                 of the compilations originating at node $A$ to server
                 $B$, and process the other half locally''). Numerous
                 static load sharing policies have been proposed. Early
                 studies considered deterministic rules [Stone 1977,
                 1978; Bokhari 1979]. More recently, Tantawi and Towsley
                 [1985] have developed a technique to find optimal
                 probabilistic rules. The principal advantage of static
                 policies is their simplicity: there is no need to
                 maintain and process system state information. Adaptive
                 policies, by contrast, are more complex, since they
                 employ information on the current system state in
                 making transfer decisions. This information makes
                 possible significantly greater performance benefits
                 than can be achieved under static policies. This
                 potential was clearly indicated by Livny and Melman
                 [1982], who showed that in a network of homogeneous,
                 autonomous nodes there is a high probability that at
                 least one node is idle while tasks are queued at some
                 other node, over a wide range of network sizes and
                 average node utilizations. In previous work [Eager,
                 Lazowska \& Zahorjan 1984] we considered the
                 appropriate level of complexity for adaptive load
                 sharing policies. (For example, how much system state
                 information should be collected, and how should it be
                 used in making transfer decisions?) Rather than
                 advocating specific policies, we considered fairly
                 abstract strategies exhibiting various levels of
                 complexity. We demonstrated that the potential of
                 adaptive load sharing can in fact be realized by quite
                 simple strategies that the use only small amounts of
                 system state information. This result is important
                 because of a number of practical concerns regarding
                 complex policies: the effect of the overhead required
                 to administer a complex policy, the effect of the
                 inevitable inaccuracies in detailed information about
                 system state and workload characteristics, and the
                 potential for instability. (We consciously use the
                 phrase ``load sharing'' rather than the more common
                 ``load balancing'' to highlight the fact that load
                 balancing, with its implication of attempting to
                 equalize queue lengths system-wide, is not an
                 appropriate objective.) Adaptive load sharing policies
                 can employ either centralized or distributed control.
                 Distributed control strategies can be of two basic
                 types (although intermediate strategies also are
                 conceivable): sender-initiated (in which congested
                 nodes search for lightly loaded nodes to which work may
                 be transferred), and receiver-initiated (in which
                 lightly loaded nodes search for congested nodes from
                 which work may be transferred). Our earlier paper
                 considered distributed, sender-initiated policies --- a
                 sufficiently rich class to allow us to answer the
                 fundamental questions of policy complexity that we were
                 addressing. In the course of understanding the reasons
                 for the degradation of these policies at high system
                 loads, we were led to consider receiver-initiated
                 policies as a possible alternative. The comparison of
                 receiver-initiated and sender-initiated adaptive load
                 sharing is the purpose of the present paper. There have
                 been several experimental studies, using prototypes and
                 simulation models, of specific (typically fairly
                 complex) adaptive load sharing policies [Bryant \&
                 Finkel 1981; Livny \& Melman 1982; Kreuger \& Finkel
                 1984; Barak \& Shiloh 1984]. Both sender-initiated
                 policies and receiver-initiated policies have been
                 considered. However, there has not previously been a
                 rigorous comparison of these two strategies. Such a
                 comparison is made difficult by the problem of choosing
                 appropriate representative policies of each type, and
                 by the potentially quite different costs incurred in
                 effecting transfers. (Receiver-initiated policies
                 typically will require the transfer of executing tasks,
                 which incurs substantial costs in most systems [Powell
                 \& Miller 1983]. Sender-initiated policies naturally
                 avoid such costly transfers, since tasks can be
                 transferred upon arrival, prior to beginning
                 execution.) Our present paper is similar to our
                 previous work in that our purpose, rather than to
                 advocate specific policies, is to address a fundamental
                 question concerning policies in general: How should
                 system state information be collected and load sharing
                 actions initiated --- by potential receivers of work,
                 or by potential senders of work? In studying this
                 question we consider a set of abstract policies that
                 represent only the essential aspects of
                 receiver-initiated and sender-initiated load sharing
                 strategies. These policies are investigated using
                 simple analytic models. Our objective is not to
                 determine the absolute performance of particular load
                 sharing policies, but rather to gain intuition
                 regarding the relative merits of the different
                 approaches under consideration. We represent locally
                 distributed systems as collections of identical nodes,
                 each consisting of a single processor. The nodes are
                 connected by a local area network (e.g., an Ethernet).
                 All nodes are subjected to the same average arrival
                 rate of tasks, which are of a single type. In contrast
                 to most previous papers on load sharing, we represent
                 the cost of task transfer as a processor cost rather
                 than as a communication network cost. It is clear from
                 measurement and analysis [Lazowska et al. 1984] that
                 the processor costs of packaging data for transmission
                 and unpackaging it upon reception far outweigh the
                 communication network costs of transmitting the data.
                 We study three abstract load sharing policies,
                 comparing their performance to each other and to that
                 of a system in which there is no load sharing. The
                 Sender policy is used a representative of
                 sender-initiated load sharing strategies. The Receiver
                 and Reservation policies are used as representatives of
                 receiver-initiated load sharing strategies; unlike the
                 Receiver policy, the Reservation policy will transfer
                 only newly arriving tasks. In a bit more detail: Sender
                 In our earlier work concerning the appropriate level of
                 complexity for adaptive load sharing schemes, we
                 identified two sub-policies of sender-initiated
                 strategies. The transfer policy determines whether a
                 task should be processed locally or remotely. The
                 location policy determines to which node a task
                 selected for transfer should be sent. In that previous
                 study, we considered threshold transfer policies, in
                 which each node uses only local state information. An
                 attempt is made to transfer a task originating at a
                 node if and only if the number of tasks already in
                 service or waiting for service (the node queue length)
                 is greater than or equal to some threshold T. We
                 considered various location policies spanning a range
                 of complexity. We found that the use of a complex
                 location policy yields only slight improvement over the
                 use of a simple location policy that, like the transfer
                 policy, uses threshold information. In this threshold
                 location policy, a node is selected at random and
                 probed to determine whether the transfer of a task to
                 that node would place the node above the threshold T.
                 If not, then the task is transferred. If so, then
                 another node is selected at random and probed in the
                 same manner. This continues until either a suitable
                 destination node is found, or the number of probes
                 reaches a static probe limit, Lp. In the latter case,
                 the originating node must process the task. (The use of
                 probing with a fixed limit, rather than broadcast,
                 ensures that the cost of executing the load sharing
                 policy will not be prohibitive even in large networks.
                 The performance of this policy was found to be
                 surprisingly insensitive to the choice of probe limit:
                 the performance with a small probe limit, e.g., 3 or 5,
                 is nearly as good as the performance with a large probe
                 limit, e.g., 20.) The sender-initiated policy with a
                 threshold transfer policy and a threshold location
                 policy was found to yield performance not far from
                 optimal, particularly at light to moderate system
                 loads. For this reason, and because of its simplicity,
                 we choose this policy to serve as the representative of
                 sender-initiated strategies for the comparison that is
                 the subject of the present paper, and term it here the
                 Sender policy. Receiver To facilitate comparison
                 between sender-initiated strategies and
                 receiver-initiated strategies, a representative policy
                 of the latter class should be as similar as possible to
                 the Sender policy. In particular, it should utilize
                 threshold-type state information, and have a bound Lp
                 on the number of remote nodes whose state can be
                 examined when making a task transfer decision. In the
                 Receiver policy, a node attempts to replace a task that
                 has completed processing if there are less than $T$
                 tasks remaining at the node. A remote node is selected
                 at random and probed to determine whether the transfer
                 of a task from that node would place its queue length
                 below the threshold value T. If not, and if the node is
                 not already in the process of transferring a task, a
                 task is transferred to the node initiating the probe.
                 Otherwise, another node is selected at random and
                 probed in the same manner. This continues until either
                 a node is found from which a task can be obtained, or
                 the number of probes reaches a static probe limit, Lp.
                 In the latter case, the node must wait until another
                 task departs before possibly attempting again to
                 initiate a transfer. (This is completely analogous to
                 the operation of the Sender policy, in which a node
                 that fails to find a suitable destination to which to
                 transfer a task must wait until another task arrives
                 before attempting again to initiate a transfer.) The
                 Receiver policy with T=1 has been studied using a
                 simulation model by Livny and Melman [1982], who term
                 it the ``poll when idle algorithm''. Reservation The
                 Reservation policy, like the Sender policy but in
                 contrast to the Receiver policy, will only transfer
                 newly arriving tasks. This may be advantageous in
                 multiprogramming systems in which nodes attempt to give
                 each of the tasks present some share of the total
                 available processing power. If the Receiver policy is
                 used in such a system, almost all task transfers will
                 involve executing tasks, and may be substantially more
                 costly than transfers of non-executing tasks. In the
                 Reservation policy, as in the Receiver policy, a node
                 attempts to replace a task that has completed
                 processing if there are less than $T$ tasks remaining
                 at the node. A remote node is selected at random and
                 probed to determine whether the transfer of the next
                 task to originate at that node would place its queue
                 length below the threshold value T. If not, and if no
                 other ``reservation'' is pending for this node, then
                 this next arrival is ``reserved'' by the probing node;
                 it is transferred upon arrival if no other tasks have
                 arrived at the probing node by that time. If the
                 reservation attempt is not successful, another node is
                 selected at random and probed at the same manner. This
                 continues until either a node is found at which the
                 next arrival can be reserved, or the number of probes
                 reaches a static probe limit, Lp. In the latter case,
                 the node must wait until another task departs before
                 possibly attempting again to reserve a task. Our
                 evaluation of this policy is optimistic. (Even this
                 optimistic evaluation predicts unsatisfactory
                 performance.) At the time a reservation is attempted,
                 we assume that the probed node can ``see into the
                 future'' to the arrival time of the (potentially)
                 reserved task. The reservation is made only if the
                 probed node will be above threshold at that time. Also,
                 even when a reservation request is successful, the
                 probed node considers this next arrival as ineligible
                 for other reservation requests only if it will actually
                 be transferred to the node holding the reservation.
                 Finally, we assume that the probability that a task
                 will be processed locally rather than transferred,
                 given that it arrives when the node queue length is at
                 or over threshold, is independent of the prior history
                 of the task arrivals and departures. In fact, this
                 probability is higher for tasks with shorter
                 interarrival times. Many of the results of our study
                 are illustrated in the accompanying figure. While the
                 figure illustrates specific choices of parameter
                 values, the results are quite robust with respect to
                 these choices; a substantial part of the full paper is
                 devoted to demonstrating this robustness. The results
                 include: Both receiver-initiated and sender-initiated
                 policies offer substantial performance advantages over
                 the situation in which no load sharing is attempted
                 (shown as M/M/1 in the figure). Sender-initiated
                 policies are preferable to receiver-initiated policies
                 at light to moderate system loads. Receiver-initiated
                 policies are preferable at high system loads, but only
                 if the costs of task transfer under the two strategies
                 are comparable. If the cost of task transfers under
                 receiver-initiated policies is significantly greater
                 than under sender-initiated policies (for example,
                 because executing tasks must be transferred), then
                 sender-initiated policies provide uniformly better
                 performance. Modifying receiver-initiated policies to
                 transfer only newly-arrived tasks (so as to avoid the
                 cost of transferring executing tasks) yields
                 unsatisfactory performances.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gelernter:1985:ACP,
  author =       "David Gelernter and Sunil Podar and Hussein G. Badr",
  title =        "An adaptive communications protocol for network
                 computers (extended abstract)",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "4--5",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317803",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A network computer is a collection of computers
                 designed to function as one machine. On a network
                 computer, as opposed to a multiprocessor, constituent
                 subcomputers are memory-disjoint and communicate only
                 by some form of message exchange. Ensemble
                 architectures like multiprocessors and network
                 computers are of growing interest because of their
                 capacity to support parallel programs, where a parallel
                 program is one that is made up of many
                 simultaneously-active, communicating processes.
                 Parallel programs should, on an appropriate
                 architecture, run faster than sequential programs, and,
                 indeed, good speed-ups have been reported in parallel
                 programming experiments in several domains, amongst
                 which are AI, numerical problems, and system
                 simulation. Our interest lies in network computers,
                 particularly ones that range in size from several
                 hundred nodes to several thousand. Network computers
                 may be organized in either of two basic ways: their
                 nodes may communicate over a shared bus (or series of
                 buses), as in S/Net; or over point-to-point links, as
                 in Cosmic Cube and the Transputer Network. The work to
                 be presented deals with the point-to-point class, the
                 elements of which we shall refer to as ``linked
                 networks''. Linked networks face a fundamental
                 communication problem. Unless they are completely
                 connected (which is rarely possible), two communicating
                 nodes will not necessarily be connected by a single
                 link. Messages between nodes must therefore, in
                 general, travel over several links and be processed by
                 several intermediate nodes. Communication delays
                 increase with the length of the traveled path. Network
                 computer designers therefore provide networks the
                 diameters of which are small relative to their size,
                 and network operating systems will attempt to place
                 communicating processes as close to each other as
                 possible. We present a communication protocol for
                 linked networks that was designed specifically for
                 network computers. Staged Circuit Switching is a
                 communication protocol that combines aspects of
                 store-and-forwarding with aspects of circuit switching,
                 where circuit switching refers to the class of
                 protocols in which a communicating source and
                 destination first construct a dedicated path or circuit
                 between them, then communicate directly over this path.
                 The path may be a physical connection, as in
                 spaced-switched circuit-switching, or a series of
                 dedicated slots in time-division multiplexing switches,
                 as in time-switching protocols. The
                 stage-circuit-switching design is strongly related to
                 spaced-switched circuit-switching and encompasses both
                 the protocol itself and a communication architecture to
                 support it. In staged circuit switching, each message
                 constructs for itself the longest physical circuit that
                 it can without waiting for links. When a message is to
                 be sent, a header that records the message's source and
                 destination is sent propagating through the network
                 towards the destination node; the header seizes each
                 free link along its path and incorporates it into a
                 growing circuit. When it meets a busy link, or arrives
                 at its destination, circuit building stops, the
                 message's data portion is transmitted and acknowledged
                 over the existing circuit, and the circuit is released.
                 A message that has not arrived at its destination then
                 gathers itself together and plunges onward in the same
                 fashion. In an empty network then, staged circuit
                 switching is the same as circuit switching: each
                 message is transmitted over a direct circuit from
                 source to destination. In a heavily loaded network, it
                 is the same as store-and-forwarding: each next-link is
                 busy, each circuit is therefore only one link long, and
                 the message proceeds hop by hop. The protocol combines
                 the speed benefits of circuit switching at light
                 traffic loads, with the high bandwidth advantages of
                 store-and-forwarding at heavy loads. We have carried
                 out extensive simulation studies to evaluate the
                 dynamics of staged circuit switching from the point of
                 view of message delays, throughput, circuit lengths,
                 efficiency, implementation, and so on. The studies were
                 implemented in the context of a toroidal topology of
                 diameter 32, yielding a 1024-node network. Uniform
                 source-to-destination distributions were used. Both the
                 topology and the source-to-destination distributions
                 are analyzed. An analysis of network saturation based
                 on mean values is also given. Staged circuit switching
                 unambiguously emerges as a strong protocol with
                 superior performance characteristics than either
                 classical store-and-forwarding or circuit switching,
                 particularly with regards to adaptability to varying
                 network loads and in providing a consistently high
                 effective network bandwidth. On the basis of our
                 results the protocol is proposed as a suitable
                 candidate for linked networks. Its attractiveness is
                 further enhanced by its potential ability to
                 continually reconfigure the network dynamically at
                 runtime to optimize for observed traffic patterns.
                 Heavily-used circuits may be left in place over longer
                 periods than a single message transmission. In this
                 way, the system constantly rearranges the network
                 topology in order to bring heavily-communicating
                 distant nodes closer together, thereby acting as a
                 ``communication cache''. A ``cache hit'' would
                 correspond to finding the desired destination node one
                 hop away from a given source. Effective exploitation of
                 this capability is the subject of ongoing research.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gelenbe:1985:ADC,
  author =       "Erol Gelenbe and David Finkel and Satish K. Tripathi",
  title =        "On the availability of a distributed computer system
                 with failing components",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "6--13",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317804",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We present a model for distributed systems with
                 failing components. Each node may fail and during its
                 recovery the load is distributed to other nodes that
                 are operational. The model assumes periodic
                 checkpointing for error recovery and testing of the
                 status of other nodes for the distribution of load. We
                 consider the availability of a node, which is the
                 proportion of time a node is available for processing,
                 as the performance measure. A methodology for
                 optimizing the availability of a node with respect to
                 the checkpointing and testing intervals is given. A
                 decomposition approach that uses the steady-state flow
                 balance condition to estimate the load at a node is
                 proposed. Numerical examples are presented to
                 demonstrate the usefulness of the technique. For the
                 case in which all nodes are identical, closed form
                 solutions are obtained.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Conway:1985:RNE,
  author =       "A. E. Conway and N. D. Georganas",
  title =        "{RECAL} --- a new efficient algorithm for the exact
                 analysis of multiple-chain closed queueing networks
                 (abstract)",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "14--14",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317805",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "RECAL, a Recursion by Chain Algorithm for computing
                 the mean performance measures of product-form
                 multiple-chain closed queueing networks, is presented.
                 It is based on a new recursive expression which relates
                 the normalization constant of a network with $r$ closed
                 routing chains to those of a set of networks having $
                 (r - l)$ chains. It relies on the artifice of breaking
                 down each chain into constituent sub-chains that each
                 have a population of one. The time and space
                 requirements of the algorithm are shown to be
                 polynomial in the number of chains. When the network
                 contains many routing chains the proposed algorithm is
                 substantially more efficient than the convolution or
                 mean value analysis algorithms. The algorithm therefore
                 extends the range of queueing networks which can be
                 analyzed efficiently by exact means. A numerical
                 example is given.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Balbo:1985:MPS,
  author =       "G. Balbo and S. C. Bruell and S. Ghanta",
  title =        "Modeling priority schemes",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "15--26",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317806",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We develop Generalized Stochastic Petri Net models for
                 several priority queueing disciplines. The building
                 blocks of these models are explained and many variants
                 are easily derivable from them. We then combine these
                 building blocks with product-form queueing network
                 models. Numerical results are provided that illustrate
                 the effectiveness of the method.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "generalized stochastic Petri nets; head-of-the-line;
                 preemptive resume; priorities; product-form queueing
                 networks; reorientation; time-out",
}

@Article{Walstra:1985:NNQ,
  author =       "Robbe J. Walstra",
  title =        "Nonexponential networks of queues: a maximum entropy
                 analysis",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "27--37",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317807",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We will propose a new, iterative method for
                 approximately analyzing closed networks of queues with
                 nonexponential service time distributions and FCFS
                 scheduling. Our method is based on the Principle of
                 Maximum Entropy and produces results which, first, are
                 consistent with the fundamental Work Rate Theorem and,
                 second, are exact for separable networks of queues.
                 Considering accuracy and execution time
                 characteristics, our method offers a viable alternative
                 to Marie's homogeneous approximation method.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Calzarossa:1985:SSC,
  author =       "Maria Calzarossa and Domenico Ferrari",
  title =        "A sensitivity study of the clustering approach to
                 workload modeling (extended abstract)",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "38--39",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317808",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In a paper published in 1984 [Ferr84], the validity of
                 applying clustering techniques to the design of an
                 executable model for an interactive workload was
                 discussed. The following assumptions, intended not to
                 be necessarily realistic but to provide sufficient
                 conditions for the applicability of clustering
                 techniques, were made: The system whose workload is to
                 be modeled is an interactive system, and its
                 performance can be accurately evaluated by solving a
                 product-form closed queueing network model. The
                 behavior of each interactive user can be adequately
                 modeled by a probabilistic graph (called a user
                 behavior graph); in such a graph, each node represents
                 an interactive command type, and the duration of a
                 user's stay in the node probabilistically equals the
                 time the user spends typing in a command of that type,
                 waiting for the system's response, and thinking about
                 what command should be input next. The interactive
                 workload to be modeled is stationary, and the workload
                 model to be constructed is intended to reproduce its
                 global characteristics (not those of some brief excerpt
                 from it exhibiting peculiar dynamics), hence to be
                 stationary as well. It was shown in [Ferr84] that,
                 under these assumptions, clustering command types
                 having the same probabilistic resource demands does not
                 affect the values of the performance indices the
                 evaluators are usually interested in, provided the
                 visit ratio to each node in the reduced (i.e.,
                 post-clustering) user behavior graph is equal to the
                 sum of the visit ratios the cluster's components had in
                 the original graph. Since the reduction we have just
                 described is equivalent to replacing each cluster with
                 one or more representatives of its components, and
                 since this is also the goal of applying clustering
                 techniques to the construction of executable workload
                 models substantially more compact than the original
                 workload to be modeled, this result shows that such
                 techniques are valid (i.e., produce accurate models)
                 when the assumptions and the conditions mentioned above
                 are satisfied. One condition which in practice is never
                 satisfied, however, is that the clustered commands are
                 characterized by exactly the same resource demands. In
                 fact, clustering algorithms are non-trivial just
                 because they have to recognize ``nearness'' among
                 commands with different characteristics, and group
                 those and only those commands whose resource demands
                 are sufficiently similar (where the notion of
                 similarity is to be defined by introducing that of
                 distance between two commands). Thus, the question of
                 the sensitivity of a workload model's accuracy to the
                 inevitable dispersion of the characteristics of a
                 cluster's components immediately arises. We know that,
                 if an adequate product-form model of an interactive
                 system can be built, if the users' behaviors can be
                 accurately modeled by probabilistic graphs, and if the
                 workload and the model of it to be constructed are
                 stationary, then a workload model in which all commands
                 with identical characteristics are grouped together and
                 modeled by a single representative is an accurate model
                 of the given workload (i.e., the model produces the
                 same values of the performance indices of interest as
                 the modeled workload when it is processed by a given
                 system). This is true, of course, provided the visit
                 ratios of the workload model's components equal the
                 sums of those of the corresponding workload components.
                 If we now apply a clustering algorithm to the given
                 workload, thereby obtaining clusters of similar, but
                 not identical, commands, and we build a workload model
                 by assembling cluster representatives (usually one per
                 cluster, for instance with demands corresponding to
                 those of the cluster's center of mass), by how much
                 will the values of the performance indices produced by
                 the workload model running on the given system differ
                 from those produced by the workload to be modeled? As
                 with several other problems, this could be attacked by
                 a mathematical approach or by an experimental one.
                 While a successful mathematical analysis of the
                 sensitivity of the major indices to the dispersion in
                 the resource demands of the commands being clustered
                 together would provide more general results, it would
                 also be likely to require the introduction of
                 simplifying assumptions (for example, having to do with
                 the distributions of the resource demands in a cluster
                 around its center of mass) whose validity would be
                 neither self-evident nor easy to verify experimentally.
                 On the other hand, an experimental approach achieves
                 results which, strictly speaking, are only applicable
                 to the cases considered in the experiments.
                 Extrapolations to other systems, other workloads, other
                 environments usually require faith, along with
                 experience, common sense, and familiarity with real
                 systems and workloads. This inherent lack of generality
                 is somehow counterbalanced, however, by the higher
                 degree of realism that is achievable with an
                 experimental investigation. In particular, when in a
                 study the properties of workloads are to play a crucial
                 role (there are very few studies indeed in which this
                 is not the case!), using a mathematical approach is
                 bound to raise about such properties questions that are
                 either very difficult or impossible to answer.
                 Primarily for this reason, and knowing very well the
                 limitations in the applicability of the results we
                 would obtain, we decided to adopt an experimental
                 approach. Since the question we were confronted with
                 had never been answered before (nor, to our knowledge,
                 had it been asked), we felt that our choice was
                 justified by the exploratory nature of the study. If
                 the resulting sensitivity were to turn out to be high,
                 we could conclude that not even under the above
                 assumptions can clustering techniques be trusted to
                 provide reasonable accuracy in all cases and hence
                 should not be used, or used with caution in those cases
                 (if they exist) in which their accuracy might be accept
                 able. If, on the other hand, the sensitivity were low,
                 then we could say that, in at least one practical case,
                 clustering techniques would have been shown to work
                 adequately (of course, under all the other assumptions
                 listed above). The rationale of this investigation
                 might be questioned by asking why it would not be more
                 convenient to test the validity of clustering
                 techniques directly, that is, by comparing the
                 performance indices produced by a real workload to
                 those produced by an executable model (artificial
                 workload) built according to a clustering technique.
                 Our answer is that, in this study as well as in
                 [Ferr84], we are more interested in understanding the
                 limitations and the implications of clustering and
                 other workload model design methods than in evaluating
                 the accuracy of clustering in a particular case. In
                 other words, we are not so much keen on finding out
                 whether the errors due to clustering are of the order
                 of 10\% or of 80\%, but we want to be able to
                 understand why they are only 10\% or as large as 80\%,
                 respectively. Thus, we need to decompose the total
                 error into the contributions to it of the various
                 discrepancies that any real situation exhibits with
                 respect to the ideal one. This paper describes a study
                 primarily performed to assess the magnitude of one such
                 contribution, that of the dispersion of the resource
                 demands of clustered commands. An experimental
                 approach, in the ease being considered here, requires
                 first of all that a workload for the experiment be
                 selected. Then, that workload is to be measured, in
                 order to obtain the values of the parameters defined by
                 the desired characterization. Next, an executable
                 workload model is to be built by applying a clustering
                 technique to the real workload selected. Then, the
                 workload and its model are to be run on the same
                 system, so that the model's accuracy can be evaluated
                 by comparing the performance indices produced by them.
                 As our study is to try to isolate the sensitivity of
                 that accuracy to the differences in demands among the
                 commands that have been grouped into the same cluster,
                 these differences must be made the only source of
                 inaccuracies in the performance produced by the model.
                 To isolate this contribution to the error from all of
                 the others, the latter sources should be eliminated.
                 Finally, the experiment is to be carried out, and its
                 results interpreted. The results show that, on the
                 whole, the clustering method for workload model design
                 is reasonably accurate in the context of the case
                 examined in our study. The sensitivities we found were
                 reasonably low. Thus, we can state that, in at least
                 one practical case and under the assumptions discussed
                 in this paper, clustering techniques for executable
                 workload model design have been shown to work well.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Raghavan:1985:CIU,
  author =       "S. V. Raghavan and R. Kalyanakrishnan",
  title =        "On the classification of interactive user behaviour
                 indices",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "40--48",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317809",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The concepts of user behaviour entropy and user
                 behaviour mobility are proposed as indices for the
                 description of user behaviour. The user behaviour
                 indices are derivable from the mode probability vector
                 and the mode transition matrix which adequately
                 describe the behaviour dynamics of an interactive user.
                 The user behaviour indices reduce the ((n*n)+n)
                 dimensional parameter space to two dimensions only for
                 classification, without loss of information related to
                 the user behaviour dynamics. The classification of the
                 users in an interactive educational environment using
                 the user behaviour indices is presented as a case
                 study.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Verkamo:1985:ERL,
  author =       "A. Inkeri Verkamo",
  title =        "Empirical results on locality in database
                 referencing",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "49--58",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317810",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Database referencing behaviour is analyzed with
                 respect to locality features. The analysis is based on
                 database reference strings collected from several runs
                 of typical batch programs accessing a real database.
                 Locality of reference is measured by the stack distance
                 probability distribution, the number of block faults,
                 and a locality measure based on the memory reservation
                 size. In all the experiments, locality of reference is
                 observed, but it is found to be weaker than in code
                 referencing or even in some previous studies on
                 database referencing. The phase/transition concept used
                 in virtual memory systems is not well applicable to
                 database referencing, since a large part of the
                 locality set is constantly changing. The disruption of
                 the phases is predominantly due to random referencing
                 of data blocks. The references to index blocks show
                 stronger locality. In some special cases, sequentiality
                 is observed in the use of the data blocks. In general,
                 neither replacement strategies developed for virtual
                 memory systems nor prefetching techniques seem adequate
                 for performance improvement of database referencing.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Khelalfa:1985:DCS,
  author =       "Halin M. Khelalfa and Anneliese K. von Mayrhauser",
  title =        "Degradable computer systems with dependent
                 subsystems",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "59--68",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317811",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "When building a model for degradable computer systems,
                 it is not sufficient to merely quantify reliability and
                 performance measures. These indices must be
                 mathematically sound if they are to be used to design
                 such systems in an optimal way. The paper presents an
                 analysis of design optimisation for degradable computer
                 systems and shows how this particular application leads
                 to a system model with interdepedent subsystems. A
                 procedure is presented on how to solve the resulting
                 Markov model. Its computational complexity is compared
                 to another solution method and shown to be largely more
                 efficient.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Chillarege:1985:ESW,
  author =       "Ram Chillarege and Ravishankar K. Lyer",
  title =        "The effect of system workload on error latency: an
                 experimental study",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "69--77",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317812",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper, a methodology for determining and
                 characterizing error latency is developed. The method
                 is based on real workload data, gathered by an
                 experiment instrumented on a VAX 11/780 during the
                 normal workload cycle of the installation. This is the
                 first attempt at jointly studying error latency and
                 workload variations in a full production system.
                 Distributions of error latency were generated by
                 simulating the occurrence of faults under varying
                 workload conditions. A family of error latency
                 distributions so generated illustrate that error
                 latency is not so much a function of when in time a
                 fault occurred but rather a function of the workload
                 that followed the failure. The study finds that the
                 mean error latency varies by a 1 to 8 (hours) ratio
                 between high and low workloads. The method is general
                 and can be applied to any system.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Gonsalves:1985:PCT,
  author =       "Timothy A. Gonsalves",
  title =        "Performance characteristics of two {Ethernets}: an
                 experimental study",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "78--86",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317813",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Local computer networks are increasing in popularity
                 for the interconnection of computers for a variety of
                 applications. One such network that has been
                 implemented on a large scale is the Ethernet. This
                 paper describes an experimental performance evaluation
                 of a 3 and a 10 Mb/s Ethernet. The effects of varying
                 packet length and transmission speed on throughput,
                 mean delay and delay distribution are quantified. The
                 protocols are seen to be fair and stable. These
                 measurements span the range from the region of high
                 performance of the CSMA/CD protocol to the upper limits
                 of its utility where performance is degraded. The
                 measurements are compared to the predictions of
                 existing analytical models. The correlation is found to
                 range from good to poor, with more sophisticated models
                 yielding better results than a simple one.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Chlamtac:1985:PIS,
  author =       "I. Chlamtac and M. Eisinger",
  title =        "Performance of integrated services (voice\slash data)
                 {CSMA\slash CD} networks",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "87--93",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317814",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider a voice/data integrated local area
                 communication system. Due to the high suitability of
                 CSMA/CD protocols for data communication and the
                 existence of real time voice delay constraints we
                 consider a hybrid TDM/CSMA/CD protocol. This model
                 fundamentally differs from the very well documented
                 voice/data integrated systems in point to point
                 networks in which both voice and data users are
                 assigned fixed duration time slots for transmission.
                 The TDM/CSMA/CD integrated system performance is
                 analysed and basic performance tradeoffs in the system
                 design are manifested.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Chlamtac:1985:AMH,
  author =       "I. Chlamtac and M. Eisinger",
  title =        "An analytic model of the hyperchannel network using
                 multiple channel architecture",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "94--104",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317815",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The HYPERchannel communication network configured
                 around one to four channels is considered. We develop a
                 queueing model which characterizes the network
                 performance as a function of the number of channels,
                 the channel load and the number of stations in the
                 network. The model is used to analyze the multichannel
                 system performance and to evaluate the effect of the
                 channel selection mechanism, as implemented by the
                 HYPERchannel station interface units, on the
                 performance. It is shown that the network bandwidth
                 utilization is directly related to the channel
                 selection process and that it varies with network
                 configuration and load. These observed relations are
                 especially significant since they are most pronounced
                 in networks with small number of stations, the typical
                 configuration in the majority of operational
                 HYPERchannel networks.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Bleistein:1985:APM,
  author =       "Sandra Bleistein and Shin-Sun Cho and Robert T.
                 Goettge",
  title =        "Analytic performance model of the {U.S.} en route air
                 traffic control computer systems",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "105--115",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317816",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An analytic performance modeling case study of a
                 complex command and control computer system is
                 presented. A queueing network model of the system was
                 developed and validated. Features of the model found to
                 be critical to its accuracy were detailed software
                 models, general service time distributions, and models
                 of transient response time behavior. Response time
                 prediction accuracy of the model was validated to 20
                 percent for moderate device utilizations. The study
                 shows that analytic techniques can be successfully
                 applied to performance modeling of complex systems.
                 Prediction of response time percentile values and
                 modeling of transient effects are identified as two
                 areas where improved analytic techniques would enhance
                 performance engineering of such systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Dowdy:1985:AUM,
  author =       "Lawrence W. Dowdy and Manvinder S. Chopra",
  title =        "On the applicability of using multiprogramming level
                 distributions",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "116--127",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317817",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A computer system's workload is represented by its
                 multiprogramming level, which is defined as the number
                 of tasks (jobs, customers) which actively compete for
                 resources within the system. In a product-form queuing
                 network model of the system, the workload is modeled by
                 assuming that the multiprogramming level is either
                 fixed (i.e., closed model) or that the multiprogramming
                 level depends upon an outside arrival process (i.e.,
                 open model). However, in many actual systems, closed
                 and open models are both inappropriate since the
                 multiprogramming level is neither fixed nor governed by
                 an outside arrival process. In an actual system., the
                 multiprogramming level varies due to features such as
                 task spawning, killing, blocking, parallel processing,
                 and/or simultaneous resource possession. The
                 multiprogramming level is a random variable with an
                 associated distribution. This paper demonstrates that
                 improved models can result from using this
                 multiprogramming level distribution information.
                 Several examples relative to open versus closed models,
                 subsystem models, actual system models, and blocking
                 models are given which demonstrate the applicability of
                 using multiprogramming level distributions. This
                 applicability, shown via the examples, is the main
                 contribution of the paper. The examples also motivate
                 interesting theoretical results relating to open
                 models, closed models, and subsystem models.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "blocking; multiprogramming level distributions; open
                 and closed queuing networks; subsystem modeling",
}

@Article{Krzesinski:1985:MQN,
  author =       "A. E. Krzesinski and P. Teunissen",
  title =        "Multiclass queueing networks with population
                 constrainted subnetworks",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "128--139",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317818",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A Multiclass Queueing Network model (MQN) is
                 partitioned into a set of disjoint subnetworks.
                 Population constraints are applied to each subnetwork
                 such that within each subnetwork each population chain
                 is either subject to an individual population
                 constraint, or a group of chains may be subject to a
                 common (shared) population constraint. Such population
                 constraints are necessary in order to model
                 multiprogramming level constraints in mainframe
                 computer systems and window flow control mechanisms in
                 computer communication networks. A computationally
                 efficient approximate solution method is developed for
                 solving MQN's with population constraints. Each
                 subnetwork is reduced to a single approximately flow
                 equivalent composite centre by assuming that the effect
                 of other chains on a given chain can be adequately
                 represented by their average customer populations. The
                 accuracy of the population constraint approximation is
                 compared against previous techniques by applying it to
                 a set of test cases for which simulation solutions have
                 previously been reported. The accuracy of the
                 approximation technique is found to be good and in
                 general is an improvement over previously published
                 concurrency constraint approximations.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  keywords =     "approximate solution; mean value analysis; multiclass
                 queueing networks; product form solutions",
}

@Article{Branwajn:1985:NSI,
  author =       "Alexandre Branwajn and Yung-Li Lily Jow",
  title =        "A note on service interruptions",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "140--148",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317986",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This note is devoted to a few remarks on the
                 performance evaluation of systems with service
                 interruptions such as priority queues for lower
                 priority customers, systems subject to breakdowns, etc.
                 Recent work on priority queues has shown that a popular
                 approximation method, the ``reduced occupancy
                 approximation'', can be exceedingly inaccurate for a
                 range of parameter values. We identify a cause of
                 inaccuracy and, hence, propose a simple correction that
                 provides a substantial improvement in the results.
                 Using the example of a simple model with service
                 interruptions, we show also that conditional
                 probabilities can be of value in deriving recurrent
                 solutions to some problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
  xxnote =       "Check: author may be Brandwajn??",
}

@Article{Plateau:1985:SSP,
  author =       "Brigitte Plateau",
  title =        "On the stochastic structure of parallelism and
                 synchronization models for distributed algorithms",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "147--154",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317819",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper a new technique to handle complex Markov
                 models is presented. This method is based on a
                 description using stochastic automatas and is dedicated
                 to distributed algorithms modelling. One example of a
                 mutual exclusion algorithm in a distributed environment
                 is extensively analysed. The mathematical analysis is
                 based on tensor algebra for matrices.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Snyder:1985:ANS,
  author =       "Patricia M. Snyder and William J. Stewart",
  title =        "An approximate numerical solution for multiclass
                 preemptive priority queues with general service time
                 distributions",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "155--165",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317820",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper an approximate numerical solution for a
                 multiclass preemptive priority single server queue is
                 developed. The arrival process of each class follows a
                 Poisson distribution. The service time distribution
                 must have a rational Laplace transform, but is
                 otherwise arbitrary and may be different for different
                 classes. The work reported here was motivated by a
                 desire to compute the equilibrium probability
                 distribution of networks containing preemptive priority
                 servers. Such networks are frequently encountered when
                 modeling computer systems, medical care delivery
                 systems and communication networks. We wish to use an
                 iterative technique which constructs a series of two
                 station networks consisting of one station from the
                 original network and one ``complementary'' station
                 whose behavior with respect to the original station
                 mimics that of the rest of the network. At each
                 iteration, it is necessary to compute the equilibrium
                 probability distribution of one or more preemptive
                 priority queues.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hevner:1985:EOD,
  author =       "Alan R. Hevner",
  title =        "Evaluation of optical disk systems for very large
                 database applications",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "166--172",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317821",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Optical Disk Systems have significant advantages over
                 conventional magnetic mass storage media for very large
                 database applications. Among other features, optical
                 disk systems offer large capacity and high transfer
                 rate. A critical problem is how to integrate the
                 optical disk system into a total application system
                 environment while maintaining the high performance
                 capabilities of the optical disk. In this paper the
                 performance of optical disk system configurations under
                 realistic application environments is analyzed via
                 queueing models. The results provide several important
                 guidelines for the use of optical disk systems on large
                 applications.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Houtekamer:1985:LDC,
  author =       "Gilbert E. Houtekamer",
  title =        "The local disk controller",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "173--182",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317822",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The performance of the I/O subsystem in the 370-XA
                 architecture has been improved considerably with the
                 introduction of the new channel subsystem, as compared
                 to the System/370 architecture. The emphasis in the
                 370-XA architecture is on reducing the CPU load
                 associated with I/O, and on reducing the congestion in
                 multi-CPU, shared systems, by redesigning the channel
                 system. In this paper we will show that a reallocation
                 of the control unit logic may triple the channel
                 subsystem's capacity, while still using the same disk
                 drives. The performance gain is achieved by adding
                 control-unit like intelligence and local buffer memory
                 to each disk drive, creating a Local Disk Controller
                 (LDC), and thus eliminating the performance degradation
                 caused by reconnect failures at a high channel
                 utilization. The system proposed remains fully software
                 compatible with the current 370-XA architecture. A
                 simpler approach, requiring only a slight modification
                 to the disk drives, is also discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Yu:1985:MCC,
  author =       "Philip S. Yu and Daniel M. Dias and John T. Robinson
                 and Balakrishna R. Iyer and Douglas Cornell",
  title =        "Modelling of centralized concurrency control in a
                 multi-system environment",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "183--191",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317795.317823",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The performance of multiple systems sharing a common
                 data base is analyzed for an architecture with
                 concurrency control using a centralized lock engine.
                 The workload is based on traces from large mainframe
                 systems running IBM's IMS database management system.
                 Based on IMS lock traces the lock contention
                 probability and data base buffer invalidation effect in
                 a multi-system environment is predicted. Workload
                 parameters are generated for use in event-driven
                 simulation models that examine the overall performance
                 of multi-system data sharing, and to determine the
                 performance impact of various system parameters and
                 design alternatives. While performance results are
                 presented for realistic system parameters, the emphasis
                 is on the methodology, approximate analysis technique
                 and on examining the factors that affect multi-system
                 performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Thomasian:1985:ASO,
  author =       "Alexander Thomasian and In Kyung Ryu",
  title =        "Analysis of some optimistic concurrency control
                 schemes based on certification",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "2",
  pages =        "192--203",
  month =        aug,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317786.317824",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:01:51 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Optimistic Concurrency Control-OCC schemes based on
                 certification are analyzed in this paper. We allow two
                 types of data access schemes referred to as static and
                 dynamic. According to the first (second) scheme a
                 transaction reads all the required data items at the
                 beginning of its processing (on demand during its
                 processing), respectively. After completing its
                 processing, each transaction is checked as to whether
                 it has encountered a data conflict. Validated
                 transactions commit; otherwise, they are restarted. A
                 variant of the regular (silent) commit scheme where a
                 committing transaction notifies conflicted transactions
                 to restart immediately (broadcast commit scheme) is
                 also considered. We use an iterative method to analyze
                 the performance of OCC schemes in the framework of a
                 system with a fixed number of transactions in multiple
                 classes with given probabilities for their occurrence.
                 The iterative method is validated against simulation
                 and shown to be highly accurate even for high data
                 contention. We present graphs/tables, which are used to
                 determine how system performance is affected by: (i)
                 various OCC schemes, (ii) transaction size, i.e.,
                 number of data items accessed, (iii) number of
                 transactions, (iv) the distribution of transaction
                 processing time requirements, (v) the throughput
                 characteristic of the system, and (vi) granule
                 placement.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ryu:1985:RPA,
  author =       "In Kyung Ryu",
  title =        "Review of {'OS 1100-of performance algorithms: a guide
                 to the resource allocation algorithms of OS-1100'} by
                 {John C. Kelly}",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "3--4",
  pages =        "9--9",
  month =        nov,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041844.1041845",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "The book describes the algorithms which were used by
                 OS-1100 to manage the resources of Sperry 1100 computer
                 systems, and lists the parameters that may affect the
                 performance of OS-1100. However, the book fails in
                 providing the reader how the algorithms and the
                 parameters affect the performance of OS-1100. It is not
                 clear to the reader why the algorithm in OS-1100 was
                 selected and how to tune the parameters.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Perros:1985:AMF,
  author =       "H. G. Perros and D. Mirchandani",
  title =        "An analytic model of a file server for bulk file
                 transfers",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "3--4",
  pages =        "14--22",
  month =        nov,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041844.1041846",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "An analytic model of a file server is presented. The
                 file server was an experimental system designed to
                 provide an environment for storage and retrieval of
                 bulk files. The file server was envisaged to be
                 accessed by single-user workstations, equipped with
                 limited secondary storage, via a local area network.
                 The analytic model is a hierarchical model involving an
                 open/closed queueing network of the BCMP type and an
                 open queueing network with blocking. These two models
                 were combined together through the means of an
                 iterative scheme. The results obtained from the
                 analytic model were in close agreement with simulation
                 data.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Domanski:1985:BIS,
  author =       "Bernard Domanski",
  title =        "Building {IMS} synthetic workloads",
  journal =      j-SIGMETRICS,
  volume =       "13",
  number =       "3--4",
  pages =        "23--28",
  month =        nov,
  year =         "1985",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/1041844.1041847",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:50 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Historically, workload characterization, and cluster
                 analysis in particular, has been a proven technique
                 when applied to performance evaluation / capacity
                 planning studies. Given the problem of constructing a
                 synthetic workload that represents a production
                 workload, our goal is to use this technique to identify
                 a {\em concise}, yet accurate set of work units that
                 will compose the workload. For IMS, these work units
                 are transactions. Yet the selection of transactions
                 must be done with care; for an additional goal must be
                 to identify a {\em concise}, yet accurate set of
                 databases that are required by the transactions. This
                 paper will review clustering techniques, and apply them
                 to drive the transaction selection process. An
                 algorithm is also presented that identifies the
                 technique behind database selection. A case study
                 follows that illustrates the implementation of the
                 methodology.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Buzen:1986:MST,
  author =       "Jeffrey P. Buzen",
  title =        "Modeling {I/O} subsystems (tutorial)",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "1--1",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317532",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This tutorial will present techniques for modeling the
                 performance of I/O subsystems that incorporate
                 channels, control units, string controllers and direct
                 access devices. The presentation will focus on the
                 general principles involved in analyses of this type,
                 and will explore the strengths and weaknesses of
                 alternative assumptions. Attendees should gain an
                 overall understanding of basic analysis procedures so
                 they can deal with alternative I/O architectures that
                 are not treated explicitly in the presentation. The
                 material in this tutorial is mathematically oriented,
                 and attendees should have some familiarity with basic
                 queueing theory. However, the presentation is almost
                 entirely self contained, and all important concepts and
                 equations will be fully explained. Operational analysis
                 will be used throughout to simplify the derivation of
                 major results and clarify the assumptions required at
                 each stage.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Ferrari:1986:WCT,
  author =       "Domenico Ferrari",
  title =        "Workload characterization (tutorial): issues and
                 approaches",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "1--1",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317900",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Workload characterization is that branch of
                 performance evaluation which concerns itself with the
                 measurement and modeling of the workloads to be
                 processed by the system being evaluated. Since all
                 performance indices of interest are workload-dependent,
                 there is no evaluation study that does not require the
                 characterization of one or more workloads. In spite of
                 the importance of the problem, our knowledge in this
                 area leaves much to be desired. The tutorial addresses
                 the main issues, both resolved and unresolved, in the
                 field, and surveys the major approaches that have been
                 proposed and are in use. Modern methods for designing
                 executable artificial workloads, as well as the
                 applications of these techniques in system procurement,
                 system tuning, and capacity planning are emphasized.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Goel:1986:SRM,
  author =       "Amrit L. Goel",
  title =        "Software reliability modeling (tutorial)",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "2--2",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317901",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "There are a number of views as to what software
                 reliability is and how it should be quantified. Some
                 people believe that this measure should be binary in
                 nature so that an imperfect program would have zero
                 reliability while a perfect one would have a
                 reliability value of one. This view parallels that of
                 program proving whereby the program is either correct
                 or incorrect. Others, however, feel that software
                 reliability should be defined as the relative frequency
                 of the times that the program works as intended by the
                 user. This view is similar to that taken in testing
                 where a percentage of the successful ewes is used as a
                 measure of program quality. According to the latter
                 viewpoint, software reliability is a probabilistic
                 measure and can be defined as follows: Let $F$ be a
                 class of faults, defined arbitrarily, and $T$ be a
                 measure of relevant time, the units of which are
                 dictated by the application at hand. Then the
                 reliability of the software package with respect to the
                 class of faults $F$ and with respect to the metric $T$,
                 is the probability that no fault of the class occurs
                 during the execution of the program for a prespecified
                 period of relevant time. A number of models have been
                 proposed during the past fifteen years to estimate
                 software reliability and several other performance
                 measures. These are based mainly on the failure history
                 of software and can be classified according to the
                 nature of the failure process studied as indicated
                 below. Times Between Failures Models: In this class of
                 models the process under study is the time between
                 failures. The most common approach is to assume that
                 the time between, say, the $ (i - 1)$ st and $i$ th
                 failures, follows a distribution whose parameters
                 depend on the number of faults remaining in the program
                 during this interval. Failure Count Models: The
                 interest of this class of models is in the number of
                 faults or failures in specified time intervals rather
                 than in times between failures. The failure counts are
                 assumed to follow a known stochastic process with a
                 time dependent discrete or continuous failure rate.
                 Fault Seeding Models: The basic approach in this class
                 of models is to ``seed'' a known number of faults in a
                 program which is assumed to have an unknown number of
                 indigenous faults. Input Domain Based Models: The basic
                 approach taken here is to generate a set of test cases
                 from an input distribution which is assumed to be
                 representative of the operational usage of the program.
                 Because of the difficulty in obtaining this
                 distribution, the input domain is partitioned into a
                 set of equivalence classes, each of which is usually
                 associated with a program path. In this tutorial we
                 discuss the key models from the above classes and the
                 related issues of parametric estimation, unification of
                 models, Bayesian interpretation, validation and
                 comparison of models, and determination of optimum
                 release time.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Hedlund:1986:PMI,
  author =       "Kye Hedlund",
  title =        "Performance modeling in integrated curcuit design
                 (tutorial)",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "2--2",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317902",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This tutorial is an introduction to performance
                 modeling in the design of integrated circuits (ICs). It
                 assumes no background in either electrical engineering
                 or VLSI design; all relevant concepts and terminology
                 will be introduced. The goal is to give an overview of
                 the role of performance modeling in IC design, the
                 current state of the art, central problems and research
                 challenges. First, the process of IC design will be
                 reviewed. Every design progresses through a series of
                 stages: concept, architecture, implementation and
                 realization. Each level of design manipulates different
                 abstractions and hence is concerned with different
                 measures of design quality. Some principle measures
                 are: speed, silicon area, power consumption and the
                 number of input/output connections. There are several
                 different major design paradigms such as gate array,
                 standard cell and custom design. Each results in
                 different tradeoffs between flexibility, ease of
                 implementation and design quality. This has a
                 fundamental impact on both the design process and the
                 resulting design. Performance considerations enter into
                 IC design at a variety of levels: device, circuit,
                 logic design and architecture. Each requires different
                 performance models, and the designer must make
                 tradeoffs that are qualitatively different at different
                 levels. Circuit level design requires fast and accurate
                 models of logic gate behavior. A circuit's speed,
                 silicon area and power consumption must be accurately
                 estimated. Each of these circuit characteristics can be
                 traded off against the others, and the designer may
                 adjust the tradeoff in order to tune the circuit to the
                 needs of a particular application. Accurate and
                 computationally fast models form the basis for the
                 tools that assist the designer in circuit optimization.
                 Tools exist that accurately predict circuit performance
                 and that automatically optimize circuits. Integrated
                 circuit design is a field still in its infancy. This,
                 coupled with the fact that the underlying technological
                 base has undergone rapid change in recent years, means
                 that performance modeling of IC design is still in its
                 formative stages. Some areas (e.g. device modeling) are
                 more mature and better understood than others (e.g.
                 architectural modeling). Research opportunities are
                 plentiful.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Artis:1986:ESP,
  author =       "H. Pat Artis",
  title =        "Expert systems for performance analysis (tutorial)",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "3--3",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317903",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A great portion of the formal practice called computer
                 performance evaluation is the application of rules of
                 thumb and proceduralized analysis of model results,
                 specific reports, and data elements based on the
                 experience and knowledge of the practitioner. Expert
                 systems provide a technique to support the analyst in
                 such mundane analyses and allow them to study more
                 complex problems that cannot easily be proceduralized.
                 Rather than replacing performance analysts expert
                 systems provide an opportunity to increase their
                 productivity. The tutorial focuses on a discussion of
                 the fundamental building blocks of expert systems:
                 vocabularies, rules, and policies. A familiar example
                 is used to illustrate using expert systems for analysis
                 of performance results.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Tripathi:1986:PIL,
  author =       "Satish K. Tripathi",
  title =        "Performance issues in local area networks (tutorial)",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "3--3",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317904",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This tutorial addresses performance problems in Local
                 Area Networks (LAN). User level performance measures
                 are affected both by the software as well as
                 communication bottlenecks. Techniques for modeling the
                 key components of the performance of a LAN will be
                 presented. Models will be presented to discuss the
                 throughput and response time characteristics of LANs.
                 We also present some measurement data obtained from a
                 LAN performance experiment.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Stone:1986:FC,
  author =       "Harold S. Stone and Dominique Thibaut",
  title =        "Footprints in the cache",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "4--8",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317533",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "This paper develops an analytical model for a
                 cache-reload transient. When an interrupt program or
                 system program runs periodically in a cache-based
                 computer, a short cache-reload transient occurs each
                 time the interrupt program is invoked. That transient
                 depends on the size of the cache, the fraction of the
                 cache used by the interrupt program, and the fraction
                 of the cache used by background programs that run
                 between interrupts. We call the portion of a cache used
                 by a program its footprint in the cache, and we show
                 that the reload transient is related to the area in the
                 tail of a normal distribution whose mean is a function
                 of the footprints of the programs that compete for the
                 cache. We believe that the model may be useful as well
                 for predicting paging behavior in virtual-memory
                 systems with round-robin scheduling.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Vernon:1986:PAM,
  author =       "Mary K. Vernon and Mark A. Holliday",
  title =        "Performance analysis of multiprocessor cache
                 consistency protocols using generalized timed {Petri}
                 nets",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "9--17",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317534",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We use an exact analytical technique, based on
                 Generalized Timed Petri Nets (GTPNs), to study the
                 performance of shared bus cache consistency protocols
                 for multiprocessors. We develop a general framework
                 within which the key characteristics of the Write-Once
                 protocol and four enhancements that have been combined
                 in various ways in the literature can be identified and
                 evaluated. We then quantitatively assess the
                 performance gains for each of the four enhancements. We
                 consider three levels of data sharing in our workload
                 models. One of the enhancements substantially improves
                 system performance in all cases. Two enhancements are
                 shown to have negligible effect over the range of
                 workloads analyzed. The fourth enhancement shows a
                 small improvement for low levels of sharing, but shows
                 more substantial improvement as sharing is increased,
                 if we assume a ``good access pattern''. The effects of
                 two architectural parameters, the blocksize and the
                 main memory cycle time are also considered.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Harrison:1986:PMP,
  author =       "P. G. Harrison and A. J. Field",
  title =        "Performance modelling of parallel computer
                 architectures",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "18--27",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317535",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "In this paper we describe two types of complex server
                 aggregations which can be used to model collections of
                 components in certain types of parallel computer
                 systems and give a case study showing how the
                 aggregations may be applied in practice. Analytical
                 models of such systems are becoming increasingly
                 important as a means of guiding the often complex
                 design processes, particularly since recent
                 developments in VLSI technology now make it possible to
                 fabricate many paper-designs hitherto impractical for
                 reasons of cost. We argue that aggregations of the type
                 described are essential in the modelling of parallel
                 systems; using the proposed techniques, large numbers
                 of components can be modelled as queue-length-dependent
                 servers within a queueing network in which the number
                 of servers is the same as the number of distinct types
                 of processing element in the system being modelled.
                 Because the number of severs in the model is fixed i.e.
                 is independent of the number of processors, very large
                 multiprocessor systems can be modelled efficiently with
                 no explosion in the size of the state space.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Madnick:1986:MMC,
  author =       "Stuart Madnick and Y. Richard Wang",
  title =        "Modeling multiprocessor computer systems with
                 unbalanced flows",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "28--34",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317536",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A performance analysis methodology using certain
                 aspects of queueing theory to evaluate computer system
                 speed performance is presented. This methodology
                 specifically focuses on modeling multiprocessor
                 computer systems with unbalanced flows (i.e., number of
                 transactions leaving a server is not the same as number
                 of transactions entering that server) due to
                 asynchronously spawned parallel tasks. This unbalanced
                 flow phenomenon, which has a significant effect on
                 performance, cannot be solved analytically by classical
                 queueing network models. A decomposition method is
                 applied to decompose the unbalanced flows. Formulae for
                 open queueing networks with unbalanced flows due to
                 asynchronously spawned tasks are developed.
                 Furthermore, an algorithm based on Buzen's convolution
                 algorithm is developed to test the necessary and
                 sufficient condition for closed system stability as
                 well as to compute performance measures. An average of
                 less than four iterations is reported for convergence
                 with this algorithm. A Study of the INFOPLEX
                 multiprocessor data storage hierarchy, comparing this
                 rapid solution algorithm with simulations, has shown
                 highly consistent results. A cost effective software
                 tool, using this methodology, has been developed to
                 analyze an architectural design, such as INFOPLEX, and
                 to produce measures such as throughput, utilization,
                 and response time so that potential performance
                 problems can be identified.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Kleeman:1986:APB,
  author =       "Lindsay Kleeman and Antonio Cantoni",
  title =        "The analysis and performance of batching arbiters",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "35--43",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317537",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "A class of arbiters, known as batching arbiters, is
                 introduced and defined. A particularly simple
                 decentralised example of a batching arbiter is
                 described, with motivation given for the batching
                 arbiter model adopted. It is shown that under
                 reasonable assumptions, batching arbiters can be
                 described by a finite state Markov chain. The key steps
                 in the analysis of the arbiter performance are the
                 method of assigning states, evaluation of state
                 transition probabilities and showing that the Markov
                 chain is irreducible. Arbiter performance parameters
                 are defined, such as proportion of time allocated to
                 each requester and mean waiting time for each
                 requester. Apart from results describing the steady
                 state behavior of the arbiter for general system
                 parameters, a number of limiting results are also
                 obtained corresponding to light and heavy request
                 loading. Finally, numerical results of practical
                 interest are presented, showing the performance
                 parameters of the arbiter versus request rates for
                 various configurations.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lehoczky:1986:PRT,
  author =       "John P. Lehoczky and Lui Sha",
  title =        "Performance of real-time bus scheduling algorithms",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "44--53",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317499.317538",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "When periodic tasks with hard deadlines communicate
                 over a bus, the problem of hard real-time bus
                 scheduling arises. This paper addresses several
                 problems of hard real-time bus scheduling, including
                 the evaluation of scheduling algorithms and the issues
                 of message packet pacing, preemption, priority
                 granularity and buffering.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Leland:1986:LBH,
  author =       "Will Leland and Teunis J. Ott",
  title =        "Load-balancing heuristics and process behavior",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "54--69",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317539",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Dynamic load balancing in a system of loosely-coupled
                 homogeneous processors may employ both judicious
                 initial placement of processes and migration of
                 existing processes to processors with fewer resident
                 processes. In order to predict the possible benefits of
                 these dynamic assignment techniques, we analyzed the
                 behavior (CPU, disk, and memory use) of 9.5 million
                 Unix* processes during normal use. The observed process
                 behavior was then used to drive simulation studies of
                 particular dynamic assignment heuristics.\par

                 Let $ F(\cdot) $ be the probability distribution of the
                 amount of CPU time used by an arbitrary process. In the
                 environment studied we found:\par

                 $ \bullet $ $ (1 - F(x)) \approx r x^{-c}, $1.05 < c <
                 1.25;\par

                 $ \bullet $ $ F(\cdot) $ is far enough from exponential
                 to make exponential models of little use.\par

                 $ \bullet $ With a foreground-background process
                 scheduling policy in each processor, simple heuristics
                 for initial placement and processor migration can
                 significantly improve the response ratios of processes
                 that demand exceptional amounts of CPU, without harming
                 the response ratios of ordinary processes.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{Lee:1986:CPB,
  author =       "Kyoo Jeong Lee and Don Towsley",
  title =        "A comparison of priority-based decentralized load
                 balancing policies",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "70--77",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317540",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "Load balancing policies in distributed systems divide
                 jobs into two classes; those processed at their of
                 origination (local jobs) and those processed at some
                 other site in the system after being transferred
                 through a communication network (remote jobs). This
                 paper considers a class of decentralized load balancing
                 policies that use a threshold on the local job queue
                 length at each host in making decisions for remote
                 processing. They differ from each other according to
                 how they assign priorities to each of these job
                 classes, ranging from one providing favorable treatment
                 to local jobs to one providing favorable treatment to
                 remote jobs. Under each policy, the optimal load
                 balancing problem is formulated as an optimization
                 problem with respect to the threshold parameter. The
                 optimal threshold is obtained numerically using
                 matrix-geometric formulation and an iteration method.
                 Last, we consider the effects that the job arrival
                 process can have on performance. One expects that load
                 balancing for systems operating in an environment of
                 bursty job arrivals should be more beneficial than for
                 an environment with random job arrivals. This fact is
                 observed through numerical examples.",
  acknowledgement = ack-nhfb,
  ajournal =     "Perform. Eval. Rev.",
  fjournal =     "ACM SIGMETRICS Performance Evaluation Review",
  journal-URL =  "https://dl.acm.org/loi/sigmetrics",
}

@Article{LeBoudec:1986:BEM,
  author =       "Jean-Yves {Le Boudec}",
  title =        "A {BCMP} extension to multiserver stations with
                 concurrent classes of customers",
  journal =      j-SIGMETRICS,
  volume =       "14",
  number =       "1",
  pages =        "78--91",
  month =        may,
  year =         "1986",
  CODEN =        "????",
  DOI =          "https://doi.org/10.1145/317531.317541",
  ISSN =         "0163-5999 (print), 1557-9484 (electronic)",
  bibdate =      "Thu Jun 26 11:02:55 MDT 2008",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/sigmetrics.bib",
  abstract =     "We consider a multiclass service station with $B$
                 identical exponential servers, with constant service
                 rate $ \mu $. At