Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "1.83",
%%%     date            = "07 June 2024",
%%%     time            = "09:14:45 MDT",
%%%     filename        = "tosem.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "https://www.math.utah.edu/~beebe",
%%%     checksum        = "34616 38931 196713 1914043",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; BibTeX; ACM Transactions on
%%%                        Software Engineering and Methodology",
%%%     license         = "public domain",
%%%     supported       = "no",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        the journal ACM Transactions on Software
%%%                        Engineering and Methodology (CODEN ATSMER,
%%%                        ISSN 1049-331X (print), 1557-7392
%%%                        (electronic)), for 1992--date.
%%%
%%%                        Publication began with volume 1, number 1, in
%%%                        January 1992.  The journal appears quarterly.
%%%
%%%                        The journal has a joint World-Wide Web site
%%%                        at:
%%%
%%%                            http://www.acm.org/pubs/tosem
%%%
%%%                        Tables-of-contents of all issues are
%%%                        available at:
%%%
%%%                            http://www.acm.org/pubs/contents/journals/tosem/
%%%                            https://dl.acm.org/loi/tosem
%%%
%%%                        Qualified subscribers can retrieve the full
%%%                        text of recent articles in PDF form.
%%%
%%%                        At version 1.83, the COMPLETE journal
%%%                        coverage looked like this:
%%%
%%%                             1992 (  15)    2003 (  14)    2014 (  47)
%%%                             1993 (  13)    2004 (  10)    2015 (  24)
%%%                             1994 (  12)    2005 (  15)    2016 (  17)
%%%                             1995 (  12)    2006 (  12)    2017 (  13)
%%%                             1996 (  14)    2007 (  19)    2018 (  25)
%%%                             1997 (  13)    2008 (  23)    2019 (  28)
%%%                             1998 (  14)    2009 (  14)    2020 (  33)
%%%                             1999 (  13)    2010 (  14)    2021 (  57)
%%%                             2000 (  14)    2011 (  18)    2022 (  86)
%%%                             2001 (  11)    2012 (  19)    2023 ( 161)
%%%                             2002 (  15)    2013 (  38)    2024 ( 140)
%%%
%%%                             Article:        973
%%%
%%%                             Total entries:  973
%%%
%%%                        The initial draft of this bibliography was
%%%                        derived from data at the ACM Web site.
%%%
%%%                        ACM copyrights explicitly permit abstracting
%%%                        with credit, so article abstracts, keywords,
%%%                        and subject classifications have been
%%%                        included in this bibliography wherever
%%%                        available.
%%%
%%%                        The bibsource keys in the bibliography
%%%                        entries below indicate the data sources.
%%%
%%%                        URL keys in the bibliography point to
%%%                        World Wide Web locations of additional
%%%                        information about the entry.
%%%
%%%                        Spelling has been verified with the UNIX
%%%                        spell and GNU ispell programs using the
%%%                        exception dictionary stored in the
%%%                        companion file with extension .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen
%%%                        as name:year:abbrev, where name is the
%%%                        family name of the first author or editor,
%%%                        year is a 4-digit number, and abbrev is a
%%%                        3-letter condensation of important title
%%%                        words. Citation tags were automatically
%%%                        generated by software developed for the
%%%                        BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, using ``bibsort -byvolume.''
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{"\input bibnames.sty" #
    "\ifx \undefined \emph         \def \emph      #1{{{\em #1\/}}}     \fi"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-TOSEM                 = "ACM Transactions on Software Engineering and
                                   Methodology"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Offutt:1992:IST,
  author =       "A. Jefferson Offutt",
  title =        "Investigations of the software testing coupling
                 effect",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "1",
  pages =        "5--20",
  month =        jan,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-1/p5-offutt/p5-offutt.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-1/p5-offutt/",
  abstract =     "Fault-based testing strategies test software by
                 focusing on specific, common types of faults. The {\em
                 coupling effect\/} hypothesizes that test data sets
                 that detect simple types of faults are sensitive enough
                 to detect more complex types of faults. This paper
                 describes empirical investigations into the coupling
                 effect over a specific class of software faults. All of
                 the results from this investigation support the
                 validity of the coupling effect. The major conclusion
                 from this investigation is the fact that by explicitly
                 testing for simple faults, we are also implicitly
                 testing for more complicated faults, giving us
                 confidence that fault-based testing is an effective way
                 to test software.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "fault-based testing; mutation; software testing; unit
                 testing",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}; Software --- Software
                 Engineering --- Software/Program Verification (D.2.4):
                 {\bf Reliability}",
}

@Article{Olender:1992:ISA,
  author =       "Kurt M. Olender and Leon J. Osterweil",
  title =        "Interprocedural static analysis of sequencing
                 constraints",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "1",
  pages =        "21--52",
  month =        jan,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-1/p21-olender/p21-olender.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-1/p21-olender/",
  abstract =     "This paper describes a system that automatically
                 performs static interprocedural sequencing analysis
                 from programmable constraint specifications. We
                 describe the algorithms used for interprocedural
                 analysis, relate the problems arising from the analysis
                 of real-world programs, and show how these difficulties
                 were overcome. Finally, we sketch the architecture of
                 our prototype analysis system (called Cesar) and
                 describe our experiences to date with its use, citing
                 performance and error detection characteristics.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Languages; Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "error detection; interprocedural data flow analysis;
                 sequencing constraints",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Validation}; Software ---
                 Software Engineering --- Testing and Debugging (D.2.5):
                 {\bf Debugging aids}; Software --- Software Engineering
                 --- Requirements/Specifications (D.2.1): {\bf
                 Methodologies (e.g., object-oriented, structured)};
                 Software --- Programming Techniques --- Sequential
                 Programming (D.1.4); Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf
                 Computer-aided software engineering (CASE)}; Software
                 --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Reliability}; Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1): {\bf Languages}",
}

@Article{Basili:1992:RAC,
  author =       "Victor R. Basili and Gianluigi Caldiera and Giovanni
                 Cantone",
  title =        "A reference architecture for the component factory",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "1",
  pages =        "53--80",
  month =        jan,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-1/p53-basili/p53-basili.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-1/p53-basili/",
  abstract =     "Software reuse can be achieved through an organization
                 that focuses on utilization of life cycle products from
                 previous developments. The component factory is both an
                 example of the more general concepts of experience and
                 domain factory and an organizational unit worth being
                 considered independently. The critical features of such
                 an organization are flexibility and continuous
                 improvement. In order to achieve these features we can
                 represent the architecture of the factory at different
                 levels of abstraction and define a reference
                 architecture from which specific architectures can be
                 derived by instantiation. A reference architecture is
                 an implementation and organization independent
                 representation of the component factory and its
                 environment. The paper outlines this reference
                 architecture, discusses the instantiation process, and
                 presents some examples of specific architectures by
                 comparing them in the framework of the reference
                 model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Economics; Management; Measurement;
                 Performance",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "component factory; experience factory; reference
                 architecture; reusability",
  subject =      "Software --- Software Engineering --- Management
                 (D.2.9); Software --- Software Engineering ---
                 Miscellaneous (D.2.m): {\bf Reusable software**};
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Software Management (K.6.3):
                 {\bf Software development}; Software --- Software
                 Engineering --- Requirements/Specifications (D.2.1):
                 {\bf Methodologies (e.g., object-oriented,
                 structured)}; Software --- Software Engineering ---
                 Design Tools and Techniques (D.2.2): {\bf Software
                 libraries}",
}

@Article{Trammell:1992:APC,
  author =       "Carmen J. Trammell and Leon H. Binder and Cathrine E.
                 Snyder",
  title =        "The automated production control documentation system:
                 a case study in cleanroom software engineering",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "1",
  pages =        "81--94",
  month =        jan,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-1/p81-trammell/p81-trammell.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-1/p81-trammell/",
  abstract =     "A prototype software system was developed for the U.S.
                 Naval Underwater Systems Center(NUSC) as a
                 demonstration of the Cleanroom Software Engineering
                 methodology. The Cleanroom method is a team approach to
                 the incremental development of software under
                 statistical quality control. Cleanroom's formal methods
                 of Box Structure specification and design, functional
                 verification, and statistical testing were used by a
                 four-person team to develop the Automated Production
                 Control Documentation(APCODOC) system, a relational
                 database application. As is typical in Cleanroom
                 developments, correctness of design and code were
                 ensured through team reviews. Eighteen errors were
                 found during functional verification of the design, and
                 nineteen errors were found during walkthrough of the
                 1820 lines of FOXBASE code. The software was not
                 executed by developers prior to independent testing
                 (i.e., there was no debugging). There were no errors in
                 compilation, no failures during statistical
                 certification testing, and the software was certified
                 at the target levels of reliability and confidence.
                 Team members attribute the ultimate error-free
                 compilation and failure-free execution of the software
                 to the rigor of the methodology and the intellectual
                 control afforded by the team approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Management; Performance; Reliability;
                 Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "box structures; cleanroom software engineering;
                 statistical quality control; statistical testing",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Software/Program Verification
                 (D.2.4): {\bf Correctness proofs}; Software ---
                 Software Engineering --- Management (D.2.9): {\bf
                 Programming teams}; Computing Milieux --- Management of
                 Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software development};
                 Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Reliability}",
}

@Article{Ballance:1992:PLB,
  author =       "Robert A. Ballance and Susan L. Graham and Michael L.
                 Van de Vanter",
  title =        "The {Pan} language-based editing system",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "1",
  pages =        "95--127",
  month =        jan,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-1/p95-ballance/p95-ballance.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-1/p95-ballance/",
  abstract =     "Powerful editing systems for developing complex
                 software documents are difficult to engineer. Besides
                 requiring efficient incremental algorithms and complex
                 data structures, such editors must accommodate flexible
                 editing styles, provide a consistent, coherent, and
                 powerful user interface, support individual variations
                 and projectwide configurations, maintain a sharable
                 database of information concerning the documents being
                 edited, and integrate smoothly with the other tools in
                 the environment. {\em Pan\/} is a language-based
                 editing and browsing system that exhibits these
                 characteristics. This paper surveys the design and
                 engineering of {\em Pan}, paying particular attention
                 to a number of issues that pervade the system:
                 incremental checking and analysis, information
                 retention in the presence of change, tolerance for
                 errors and anomalies, and extension facilities.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Documentation; Human Factors; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "coherent user interfaces; colander; contextual
                 constraint; extension facilities; grammatical
                 abstraction; interactive programming environment;
                 Ladle; logic programming; logical constraint grammar;
                 Pan; reason maintenance; syntax-recognizing editor;
                 tolerance for errors and anomalies",
  subject =      "Software --- Software Engineering --- Coding Tools and
                 Techniques (D.2.3): {\bf Program editors}; Software ---
                 Software Engineering --- Programming Environments
                 (D.2.6); Software --- Software Engineering --- Design
                 Tools and Techniques (D.2.2): {\bf User interfaces};
                 Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7)",
}

@Article{Engels:1992:BIS,
  author =       "G. Engels and C. Lewerentz and M. Nagl and W.
                 Sch{\"a}fer and A. Sch{\"u}rr",
  title =        "Building integrated software development environments.
                 {Part} {I} tool specification",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "2",
  pages =        "135--167",
  month =        apr,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-2/p135-engels/p135-engels.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-2/p135-engels/",
  abstract =     "The conceptual modeling approach of the IPSEN
                 (Integrated Project Support Environment) project for
                 building highly integrated environments is based on
                 using attributed graphs to model and implement
                 arbitrary object structures, in particular all kinds of
                 software documents and their relationships. A language
                 based on graph grammars, called PROGRESS (PROgrammed
                 Graph REwriting SyStems), and a suitable method for the
                 application of this language, called graph grammar
                 engineering, have been developed over the last ten
                 years. This language and method are being extensively
                 used for specifying the complex graph structures of
                 internal document representations as well as for
                 specifying the functionality of all tools (editors,
                 browsers, analyzers, debuggers) working on these
                 internal representations. This paper explains the
                 language and the method for applying the language based
                 on a pragmatic nontrivial example of a software
                 production process and its corresponding documents. In
                 particular, it is shown why and how a graph
                 grammar-based strongly typed language is perfectly
                 suitable to formally specify highly integrated software
                 tools. In addition, it is shown that the implementation
                 of these tools (i.e., an environment composed of these
                 tools) is systematically being derived from the formal
                 specifications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "attribute grammars; attributed graphs; environment
                 generators; graph grammars",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2); Software --- Software Engineering ---
                 Programming Environments (D.2.6); Software --- Software
                 Engineering --- Design** (D.2.10); Information Systems
                 --- Database Management --- Languages (H.2.3); Software
                 --- Programming Languages --- Language Classifications
                 (D.3.2)",
}

@Article{Kiper:1992:STR,
  author =       "James D. Kiper",
  title =        "Structural testing of rule-based expert systems",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "2",
  pages =        "168--187",
  month =        apr,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-2/p168-kiper/p168-kiper.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-2/p168-kiper/",
  abstract =     "Testing of rule-based expert systems has become a high
                 priority for many organizations as the use of such
                 systems proliferates. Traditional software testing
                 techniques apply to some components of rule-based
                 systems, e.g., the inference engine. However, to
                 structurally test the rule base component requires new
                 techniques or adaptations of existing ones. This paper
                 describes one such adaptation: an extension of data
                 flow path selection in which a graphical representation
                 of a rule base is defined and evaluated. This graphical
                 form, called a logical path graph, captures logical
                 paths through a rule base. These logical paths create
                 precisely the abstractions needed in the testing
                 process. An algorithm for the construction of logical
                 path graphs are analyzed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Experimentation; Measurement;
                 Performance; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "basis path testing; data flow path selection; expert
                 systems; rule bases; structured testing",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Computing Methodologies ---
                 Artificial Intelligence --- Programming Languages and
                 Software (I.2.5): {\bf Expert system tools and
                 techniques}",
}

@Article{Schneider:1992:ESF,
  author =       "G. Michael Schneider and Johnny Martin and W. T.
                 Tsai",
  title =        "An experimental study of fault detection in user
                 requirements documents",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "2",
  pages =        "188--204",
  month =        apr,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-2/p188-schneider/p188-schneider.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-2/p188-schneider/",
  abstract =     "This paper describes a software engineering experiment
                 designed to confirm results from an earlier project
                 which measured fault detection rates in {\em user
                 requirements documents\/} (URD). The experiment
                 described in this paper involves the creation of a
                 standardized URD with a known number of injected faults
                 of specific type. Nine independent inspection teams
                 were given this URD with instructions to locate as many
                 faults as possible using the N-fold requirements
                 inspection technique developed by the authors. Results
                 obtained from this experiment confirm earlier
                 conclusions about the low rate of fault detection in
                 requirements documents using formal inspections and the
                 advantages to be gained using the N-fold inspection
                 method. The experiment also provides new results
                 concerning variability in inspection team performance
                 and the relative difficulty of locating different
                 classes of URD faults.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Experimentation; Management; Performance;
                 Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "fault detection; inspections; user requirements",
  subject =      "Software --- Software Engineering --- Management
                 (D.2.9): {\bf Programming teams}; Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Software Management (K.6.3): {\bf Software
                 development}; Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Tools};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}",
}

@Article{Ostertag:1992:CSR,
  author =       "Eduardo Ostertag and James Hendler and Rub{\'e}n
                 Prieto D{\'\i}az and Christine Braun",
  title =        "Computing similarity in a reuse library system: an
                 {AI-based} approach",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "3",
  pages =        "205--228",
  month =        jul,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-3/p205-ostertag/p205-ostertag.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-3/p205-ostertag/",
  abstract =     "This paper presents an AI based library system for
                 software reuse, called AIRS, that allows a developer to
                 browse a software library in search of components that
                 best meet some stated requirement. A {\em component\/}
                 is described by a set of ({\em feature, term\/}) pairs.
                 A feature represents a classification criterion, and is
                 defined by a set of related terms. The system allows to
                 represent {\em packages\/} (logical units that group a
                 set of components) which are also described in terms of
                 features. Candidate reuse components and packages are
                 selected from the library based on the degree of
                 similarity between their descriptions and a given
                 target description. Similarity is quantified by a
                 nonnegative magnitude ({\em distance\/}) proportional
                 to the effort required to obtain the target given a
                 candidate. Distances are computed by {\em comparator\/}
                 functions based on the {\em subsumption, closeness},
                 and {\em package\/} relations. We present a
                 formalization of the concepts on which the AIRS system
                 is based. The functionality of a prototype
                 implementation of the AIRS system is illustrated by
                 application to two different software libraries: a set
                 of Ada packages for data structure manipulation, and a
                 set of C components for use in Command, Control, and
                 Information Systems. Finally, we discuss some of the
                 ideas we are currently exploring to automate the
                 construction of AIRS classification libraries.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "facet classification; similarity-based retrieval",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Software libraries}; Software
                 --- Software Engineering --- Metrics (D.2.8): {\bf
                 Complexity measures}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Retrieval models};
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Software Management (K.6.3):
                 {\bf Software selection}; Software --- Software
                 Engineering --- Miscellaneous (D.2.m): {\bf Reusable
                 software**}",
}

@Article{Sullivan:1992:REI,
  author =       "Kevin J. Sullivan and David Notkin",
  title =        "Reconciling environment integration and software
                 evolution",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "3",
  pages =        "229--268",
  month =        jul,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-3/p229-sullivan/p229-sullivan.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-3/p229-sullivan/",
  abstract =     "Common software design approaches complicate both tool
                 integration and software evolution when applied in the
                 development of integrated environments. We illustrate
                 this by tracing the evolution of three different
                 designs for a simple integrated environment as
                 representative changes are made to the requirements. We
                 present an approach that eases integration and
                 evolution by preserving tool independence in the face
                 of integration. We design tool integration
                 relationships as separate components called {\em
                 mediators}, and we design tools to implicitly invoke
                 mediators that integrate them. Mediators separate tools
                 from each other, while implicit invocation allows tools
                 to remain independent of mediators. To enable the use
                 of our approach on a range of platforms, we provide a
                 formalized model and requirements for implicit
                 invocation mechanisms. We apply this model both to
                 analyze existing mechanisms and in the design of a
                 mechanism for C++.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "abstract behavior type; behavior abstraction;
                 component independence; environment integration; event
                 mechanism; implicit invocation; integrated environment;
                 mediator; mediator/event design; software evolution;
                 tool integration",
  subject =      "Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7): {\bf
                 Enhancement**}; Software --- Software Engineering ---
                 Design** (D.2.10): {\bf Methodologies**}; Computing
                 Milieux --- Management of Computing and Information
                 Systems --- Software Management (K.6.3): {\bf Software
                 development}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Extensibility**}; Computing Milieux --- Management
                 of Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software maintenance};
                 Software --- Software Engineering --- Programming
                 Environments (D.2.6); Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2); Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Assertions}; Software --- Software
                 Engineering --- Requirements/Specifications (D.2.1):
                 {\bf Methodologies (e.g., object-oriented,
                 structured)}",
}

@Article{Tyszberowicz:1992:OPL,
  author =       "Shmuel Tyszberowicz and Amiram Yehudai",
  title =        "{OBSERV} --- a prototyping language and environment",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "3",
  pages =        "269--309",
  month =        jul,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-3/p269-tyszberowicz/p269-tyszberowicz.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-3/p269-tyszberowicz/",
  abstract =     "The OBSERV methodology for software development is
                 based on rapid construction of an executable
                 specification, or prototype, of a systems, which may be
                 examined and modified repeatedly to achieve the desired
                 functionality. The objectives of OBSERV also include
                 facilitating a smooth transition to a target system,
                 and providing means for reusing specification, design,
                 and code of systems and subsystems. We are particularly
                 interested in handling embedded systems, which are
                 likely to have concurrency and have some real-time
                 requirements. \par

                 The OBSERV prototyping language combines several
                 paradigms to express the behavior of a system. The
                 object-oriented approach provides the basic mechanism
                 for building a system from a collection of objects,
                 with well-defined interfaces between them. We use
                 finite-state machines to model the behavior of
                 individual objects. At a lower level, activities that
                 occur within objects, either upon entry to a state or
                 in transition between thus allowing a nonprocedural
                 description. \par

                 The environment provided to a prototype builder is as
                 important as the language. We have made an attempt to
                 provide flexible tools for executing or simulating the
                 prototype being built, as well as for browsing and
                 static checking. The first implementation of the tools
                 was window based but not graphic. A graphic front end,
                 name CRUISE, was developed afterwards. \par

                 A simulation sequence focuses on a single object, which
                 can be as complex as necessary, possibly the entire
                 system, and expects all the interactions between it and
                 the outside world to be achieved by communication
                 between the simulator and the user. The simulator
                 allows the user to easily switch back and forth from
                 one object to another, simulating each object in
                 isolation. \par

                 To enable testing the behavior of a prototype in a
                 realistic environment, it is possible to construct
                 objects that imitate the environment objects. We also
                 allow simulation of systems with missing pieces, by
                 calling upon the user to simulate any such missing
                 piece by himself.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Documentation; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "browsers; concurrency; CRUISE; embedded systems;
                 graphical user interface; interactive programming
                 environments; logic programming; modeling with finite
                 state machines; object-oriented approach; OBSERV; real
                 time systems; simulator; software reuse; static
                 checker",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2); Software --- Software Engineering ---
                 Miscellaneous (D.2.m): {\bf Rapid prototyping**};
                 Software --- Software Engineering --- Miscellaneous
                 (D.2.m): {\bf Reusable software**}; Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf OBSERV}",
}

@Article{Yang:1992:PIA,
  author =       "Wuu Yang and Susan Horwitz and Thomas Reps",
  title =        "A program integration algorithm that accommodates
                 semantics-preserving transformations",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "3",
  pages =        "310--354",
  month =        jul,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-3/p310-yang/p310-yang.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-3/p310-yang/",
  abstract =     "Given a program {\em Base\/} and two variants, {\em
                 A\/} and {\em B}, each created by modifying separate
                 copies of {\em Base}, the goal of program integration
                 is to determine whether the modifications interfere,
                 and if they do not, to create an integrated program
                 that includes both sets of changes as well as the
                 portions of {\em Base\/} preserved in both variants.
                 Text-based integration techniques, such as the one used
                 by the Unix {\em diff 3\/} utility, are obviously
                 unsatisfactory because one has no guarantees about how
                 the execution behavior of the integrated program
                 relates to the behaviors of {\em Base}, {\em A}, and
                 {\em B}. The first program-integration algorithm to
                 provide such guarantees was developed by Horwitz et
                 al.[13]. However, a limitation of that algorithm is
                 that it incorporates no notion of semantics-preserving
                 transformations. This limitation causes the algorithm
                 to be overly conservative in its definition of
                 interference. For example, if one variant changes the
                 {\em way\/} a computation is performed (without
                 changing the values computed) while the other variant
                 adds code that uses the result of the computation, the
                 algorithm would classify those changes as interfering.
                 This paper describes a new integration algorithm that
                 is able to accommodate semantics-preserving
                 transformations.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "coarsest partition; control dependence; data
                 dependence; data-flow analysis; flow dependence;
                 program dependence graph; program integration; program
                 representation graph; static-single-assignment form",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Programmer workbench**};
                 Software --- Software Engineering --- Coding Tools and
                 Techniques (D.2.3): {\bf Program editors}; Software ---
                 Software Engineering --- Programming Environments
                 (D.2.6); Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Enhancement**}; Software --- Software Engineering
                 --- Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Restructuring, reverse engineering, and
                 reengineering}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Version control}; Software --- Software
                 Engineering --- Management (D.2.9): {\bf Software
                 configuration management}; Software --- Programming
                 Languages --- Processors (D.3.4): {\bf Optimization};
                 Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Compilers}; Software --- Programming
                 Languages --- Processors (D.3.4): {\bf Interpreters}",
}

@Article{Batory:1992:DIH,
  author =       "Don Batory and Sean O'Malley",
  title =        "The design and implementation of hierarchical software
                 systems with reusable components",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "4",
  pages =        "355--398",
  month =        oct,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-4/p355-batory/p355-batory.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-4/p355-batory/",
  abstract =     "We present a domain-independent model of hierarchical
                 software system design and construction that is based
                 on interchangeable software components and large-scale
                 reuse. The model unifies the conceptualizations of two
                 independent projects, Genesis and Avoca, that are
                 successful examples of software
                 component/building-block technologies and domain
                 modeling. Building-block technologies exploit
                 large-scale reuse, rely on open architecture software,
                 and elevate the granularity of programming to the
                 subsystem level. Domain modeling formalizes the
                 similarities and differences among systems of a domain.
                 We believe our model is a blueprint for achieving
                 software component technologies in many domains.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Standardization",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "domain modeling; open system architectures; reuse;
                 software building-blocks; software design",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2): {\bf Protocol architecture}; Software ---
                 Programming Techniques --- Object-oriented Programming
                 (D.1.5); Software --- Software Engineering --- Design
                 Tools and Techniques (D.2.2): {\bf Modules and
                 interfaces}; Software --- Software Engineering ---
                 Design Tools and Techniques (D.2.2): {\bf Software
                 libraries}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Extensibility**}; Software --- Software
                 Engineering --- Design** (D.2.10): {\bf
                 Methodologies**}; Software --- Software Engineering ---
                 Design** (D.2.10): {\bf Representation**}; Software ---
                 Software Engineering --- Miscellaneous (D.2.m): {\bf
                 Rapid prototyping**}; Software --- Software Engineering
                 --- Miscellaneous (D.2.m): {\bf Reusable software**};
                 Computing Methodologies --- Simulation and Modeling ---
                 Model Development (I.6.5): {\bf Modeling
                 methodologies}",
}

@Article{Harel:1992:SO,
  author =       "David Harel and Chaim-arie Kahana",
  title =        "On statecharts with overlapping",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "4",
  pages =        "399--421",
  month =        oct,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-4/p399-harel/p399-harel.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-4/p399-harel/",
  abstract =     "The problem of extending the language of statecharts
                 to include overlapping states is considered. The need
                 for such an extension is motivated and the subtlety of
                 the problem is illustrated by exhibiting the
                 shortcomings of naive approaches. The syntax and formal
                 semantics of our extension are then presented, showing
                 in the process that the definitions for conventional
                 statecharts constitute a special case. Our definitions
                 are rather complex, a fact that we feel points to the
                 inherent difficulty of such an extension. We thus
                 prefer to leave open the question of whether or not it
                 should be adopted in practice.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "higraphs; reactive systems; statecharts; visual
                 language",
}

@Article{Zeil:1992:DLE,
  author =       "Steven J. Zeil and Faten H. Afifi and Lee J. White",
  title =        "Detection of linear errors via domain testing",
  journal =      j-TOSEM,
  volume =       "1",
  number =       "4",
  pages =        "422--451",
  month =        oct,
  year =         "1992",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1992-1-4/p422-zeil/p422-zeil.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1992-1-4/p422-zeil/",
  abstract =     "Domain testing attempts to find errors in the numeric
                 expressions affecting the flow of control through a
                 program. Intuitively, domain testing provides a
                 systematic form of boundary value testing for the
                 conditional statements within a program. Several forms
                 of domain testing have been proposed, all dealing with
                 the detection of linear errors in linear functions.
                 \par

                 Perturbation analysis has been previously developed as
                 a measure of the volume of faults, from within a
                 selected space of possible faults, left undetected by a
                 test set. It is adapted here to errors and error
                 spaces. The adapted form is used to show that the
                 different forms of domain testing are closer in error
                 detection ability than had been supposed. They may all
                 be considered effective for finding linear errors in
                 linear predicate functions. A simple extension is
                 proposed, which allows them to detect linear errors in
                 nonlinear predicate functions using only a single
                 additional test point.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "domain testing; perturbation testing",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Mathematics of Computing ---
                 Numerical Analysis --- Ordinary Differential Equations
                 (G.1.7): {\bf Boundary value problems}",
}

@Article{Broy:1993:FST,
  author =       "Manfred Broy",
  title =        "Functional specification of time-sensitive
                 communicating systems",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "1",
  pages =        "1--46",
  month =        jan,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-1/p1-broy/p1-broy.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-1/p1-broy/",
  abstract =     "A formal model and a logical framework for the
                 functional specification of time-sensitive
                 communicating systems and their interacting components
                 are outlined. The specification method is modular with
                 respect to sequential composition, parallel
                 composition, and communication feedback. Nondeterminism
                 is included by underspecification. The application of
                 the specification method to timed communicating
                 functions is demonstrated. Abstractions from time are
                 studied. In particular, a rational is given for the
                 chosen concepts of the functional specification
                 technique. The relationship between system models based
                 on nondeterminism and system models based on explicit
                 time notions is investigated. Forms of reasoning are
                 considered. The alternating bit protocol is used as a
                 running example.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Experimentation; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "functional system models; real-time systems;
                 specification",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4); Software --- Operating Systems
                 --- Organization and Design (D.4.7); Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1); Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2)",
}

@Article{Kaiser:1993:PDI,
  author =       "Gail E. Kaiser and Simon M. Kaplan",
  title =        "Parallel and distributed incremental attribute
                 evaluation algorithms for multiuser software
                 development environments",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "1",
  pages =        "47--92",
  month =        jan,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-1/p47-kaiser/p47-kaiser.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-1/p47-kaiser/",
  abstract =     "The problem of {\em change propagation\/} in multiuser
                 software development environments distributed across a
                 local-area network is addressed. The program is modeled
                 as an attributed parse tree segmented among multiple
                 user processes and changes are modeled as subtree
                 replacements requested asynchronously by individual
                 users. Change propagation is then implemented using
                 decentralized incremental evaluation of an attribute
                 grammar that defines the static semantic properties of
                 the programming language. Building up to our primary
                 result, we first present algorithms that support
                 parallel evaluation on a centralized tree in response
                 to single edits using a singe editing cursor and
                 multiple edits with multiple editing cursors. Then we
                 present our algorithm for parallel evaluation on a
                 decentralized tree. We also present a protocol to
                 guarantee reliability of the evaluation algorithm as
                 components of the decentralized tree become unavailable
                 due to failures and return to availability.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Languages; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "attribute grammar; change propagation; distributed;
                 incremental algorithm; parallel; reliability",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6): {\bf Interactive environments};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Modules and interfaces};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Coding Tools and Techniques (D.2.3): {\bf Program
                 editors}; Software --- Programming Languages --- Formal
                 Definitions and Theory (D.3.1): {\bf Semantics};
                 Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3)",
}

@Article{Whittaker:1993:MAS,
  author =       "James A. Whittaker and J. H. Poore",
  title =        "{Markov} analysis of software specifications",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "1",
  pages =        "93--106",
  month =        jan,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-1/p93-whittaker/p93-whittaker.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-1/p93-whittaker/",
  abstract =     "A procedure for modeling software usage with the
                 finite state, discrete parameter Markov chain is
                 described. It involves rigorous analysis of the
                 specification before design and coding begin. Many
                 benefits emerge from this process, including the
                 ability to synthesize a macro level usage distribution
                 from a micro level understanding of how the software
                 will be used. This usage distribution becomes the basis
                 for a statistical test of the software, which is
                 fundamental to the Cleanroom development process. Some
                 analytical results known for Markov chains that have
                 meaningful implications and interpretations for the
                 software development process are described.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Management; Measurement; Reliability;
                 Standardization; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "box structure method; certification; Cleanroom; Markov
                 chain; software specification; statistical test;
                 stochastic process; usage distribution",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Mathematics of
                 Computing --- Probability and Statistics (G.3);
                 Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}; Software --- Software
                 Engineering --- Metrics (D.2.8): {\bf Complexity
                 measures}; Software --- Software Engineering ---
                 Management (D.2.9): {\bf Software quality assurance
                 (SQA)}; Computing Methodologies --- Simulation and
                 Modeling --- Model Development (I.6.5): {\bf Modeling
                 methodologies}",
}

@Article{DeMillo:1993:ERA,
  author =       "Richard A. DeMillo and A. Jefferson Offutt",
  title =        "Experimental results from an automatic test case
                 generator",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "2",
  pages =        "109--127",
  month =        apr,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-2/p109-demillo/p109-demillo.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-2/p109-demillo/",
  abstract =     "Constraint-based testing is a novel way of generating
                 test data to detect specific types of common
                 programming faults. The conditions under which faults
                 will be detected are encoded as mathematical systems of
                 constraints in terms of program symbols. A set of
                 tools, collectively called Godzilla, has been
                 implemented that automatically generates constraint
                 systems and solves them to create test cases for use by
                 the Mothra testing system. Experimental results from
                 using Godzilla show that the technique can produce test
                 data that is very close in terms of mutation adequacy
                 to test data that is produced manually, and at
                 substantially reduced cost. Additionally, these
                 experiments have suggested a new procedure for unit
                 testing, where test cases are viewed as throw-away
                 items rather than scarce resources.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Measurement; Performance;
                 Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "adequacy; constraints; mutation analysis",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}",
}

@Article{Feldman:1993:SRS,
  author =       "Yishai A. Feldman and Haim Schneider",
  title =        "Simulating reactive systems by deduction",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "2",
  pages =        "128--175",
  month =        apr,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-2/p128-feldman/p128-feldman.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-2/p128-feldman/",
  abstract =     "Debugging is one of the main uses of simulation.
                 Localizing bugs or finding the reasons for unclear
                 behavior involves going backwards in time, whereas
                 simulation goes forward in time. Therefore, identifying
                 causes with the aid of most existing simulation tools
                 usually requires repeating the simulation several
                 times, each time with reduced holes in the sieve. An
                 alternative is simulation by deduction, a technique in
                 which the steps in the dynamic behavior of the
                 simulated model are deduced by a reasoning system. A
                 simulation system that uses simulation by deduction can
                 give direct answers to questions about the reasons for
                 the simulation results. By recording the support for
                 its deductions, such a system can answer ``why'' and
                 ``why not'' questions about the scenario. \par

                 Another benefit of simulation by deduction is that it
                 enables symbolic simulation, that is, simulating a
                 scenario given only a partial description of the
                 environment and the simulated model. This allows
                 verifying properties of an evolving design at any stage
                 of the design process, and thus checking the
                 consequences of the design decisions made so far. In
                 order to allow deducing as much as possible from
                 partial information, the axiom system has to be
                 minimalistic, i.e., axioms have to require the minimum
                 amount of knowledge of simulation inputs. \par

                 These ideas were implemented in a system called SIP,
                 which simulates the behavior of reactive systems. SIP
                 is capable of answering ``why,'' ``why not,'' and
                 ``what if'' questions. It also has a limited capability
                 of dealing with partial knowledge. SIP is based on a
                 reasoning system that is responsible for deducing the
                 effects of the external inputs on the state of the
                 simulated model, and recording the support for its
                 deductions. The logical basis for the deduction of a
                 step in SIP is provided by a minimalistic axiom system
                 for statecharts. \par

                 Although SIP simulates reactive systems described as
                 statecharts, the principle of simulation by deduction
                 is applicable to other types of systems and
                 descriptions, provided only that they have a
                 well-defined formal semantics.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3): {\bf
                 Deduction}; Computing Methodologies --- Simulation and
                 Modeling --- Simulation Support Systems (I.6.7): {\bf
                 Environments}; Computer Applications --- Computers in
                 Other Systems (J.7)",
}

@Article{Klint:1993:MEG,
  author =       "P. Klint",
  title =        "A meta-environment for generating programming
                 environments",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "2",
  pages =        "176--201",
  month =        apr,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-2/p176-klint/p176-klint.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-2/p176-klint/",
  abstract =     "Over the last decade, considerable progress has been
                 made in solving the problems of automatic generation of
                 programming/development environments, given a formal
                 definition of some programming or specification
                 language. In most cases, research has focused on the
                 functionality and efficiency of the generated
                 environments, and, of course, these aspects will
                 ultimately determine the acceptance of environment
                 generators. However, only marginal attention has been
                 paid to the development process of formal language
                 definitions itself. Assuming that the quality of
                 automatically generated environments will be
                 satisfactory within a few years, the development costs
                 of formal language definitions will then become the
                 next limiting factor determining ultimate success and
                 acceptance of environment generators. \par

                 In this paper we describe the design and implementation
                 of a meta-environment (a development environment for
                 formal language definitions) based on the formalism ASF
                 + SDF. This meta-environment is currently being
                 implemented as part of the Centaur system and is, at
                 least partly, obtained by applying environment
                 generation techniques to the language definition
                 formalism itself. A central problem is providing fully
                 interactive editing of modular language definitions
                 such that modifications made to the language definition
                 during editing can be translated immediately to
                 modifications in the programming environment generated
                 from the original language definition. Therefore, some
                 of the issues addressed are the treatment of formalisms
                 with user-definable syntax and incremental program
                 generation techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Computing Methodologies ---
                 Artificial Intelligence --- Automatic Programming
                 (I.2.2); Software --- Programming Languages --- Formal
                 Definitions and Theory (D.3.1)",
}

@Article{Ciancarini:1993:CRB,
  author =       "Paolo Ciancarini",
  title =        "Coordinating rule-based software processes with
                 {ESP}",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "3",
  pages =        "203--227",
  month =        jul,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-3/p203-ciancarini/p203-ciancarini.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-3/p203-ciancarini/",
  abstract =     "ESP is a language for modeling rule-based software
                 processes that take place in a distributed software
                 development environment. It is based on PoliS, an
                 abstract coordination model that relies on Multiple
                 Tuple Spaces, i.e., collections of tuples a la Linda.
                 PoliS extends Linda aiming at the specification and
                 coordination of logically distributed systems. ESP
                 (Extended Shared Prolog) combines the PoliS mechanisms
                 to deal with concurrency and distribution, with the
                 logic-programming language Prolog, to deal with rules
                 and deduction. Such a combination of a coordination
                 model and a logic language provides a powerful
                 framework in which experiments about rule-based
                 software process programming can be performed and
                 evaluated.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "concurrency; logic programming; multiuser programming
                 environment; rule-based programming; software process;
                 software process modeling",
  subject =      "Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3); Software --- Software Engineering
                 --- Programming Environments (D.2.6); Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3): {\bf Deduction}; Software
                 --- Programming Languages --- Language Classifications
                 (D.3.2): {\bf Concurrent, distributed, and parallel
                 languages}; Computing Methodologies --- Artificial
                 Intelligence --- Deduction and Theorem Proving (I.2.3):
                 {\bf Logic programming}; Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Software Management (K.6.3): {\bf Software
                 development}; Computing Methodologies --- Simulation
                 and Modeling --- Applications (I.6.3); Software ---
                 Software Engineering --- Design** (D.2.10): {\bf
                 Methodologies**}; Software --- Software Engineering ---
                 Management (D.2.9)",
}

@Article{Griswold:1993:AAP,
  author =       "William G. Griswold and David Notkin",
  title =        "Automated assistance for program restructuring",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "3",
  pages =        "228--269",
  month =        jul,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-3/p228-griswold/p228-griswold.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-3/p228-griswold/",
  abstract =     "Maintenance tends to degrade the structure of
                 software, ultimately making maintenance more costly. At
                 times, then, it is worthwhile to manipulate the
                 structure of a system to make changes easier. However,
                 manual restructuring is an error-prone and expensive
                 activity. By separating structural manipulations from
                 other maintenance activities, the semantics of a system
                 can be held constant by a tool, assuring that no errors
                 are introduced by restructuring. To allow the
                 maintenance team to focus on the aspects of
                 restructuring and maintenance requiring human judgment,
                 a transformation-based tool can be provided--based on a
                 model that exploits preserving data flow dependence and
                 control flow dependence--to automate the repetitive,
                 error-prone, and computationally demanding aspects of
                 restructuring. A set of automatable transformations is
                 introduced; their impact on structure is described, and
                 their usefulness is demonstrated in examples. A model
                 to aid building meaning-preserving restructuring
                 transformations is described, and its realization in a
                 functioning prototype tool for restructuring Scheme
                 programs is discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Management",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "CASE; flow analysis; meaning-preserving
                 transformations; software engineering; software
                 evolution; software maintenance; software
                 restructuring; source-level restructuring",
  subject =      "Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7): {\bf
                 Restructuring, reverse engineering, and reengineering};
                 Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7): {\bf
                 Corrections**}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Enhancement**}; Software --- Software Engineering
                 --- Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Extensibility**}; Software --- Software
                 Engineering --- Design** (D.2.10); Computing Milieux
                 --- Management of Computing and Information Systems ---
                 Software Management (K.6.3): {\bf Software
                 maintenance}; Software --- Software Engineering ---
                 Design Tools and Techniques (D.2.2): {\bf
                 Computer-aided software engineering (CASE)}; Computing
                 Methodologies --- Artificial Intelligence --- Automatic
                 Programming (I.2.2): {\bf Program transformation}",
}

@Article{Harrold:1993:MCS,
  author =       "M. Jean Harrold and Rajiv Gupta and Mary Lou Soffa",
  title =        "A methodology for controlling the size of a test
                 suite",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "3",
  pages =        "270--285",
  month =        jul,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-3/p270-harrold/p270-harrold.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-3/p270-harrold/",
  abstract =     "This paper presents a technique to select a
                 representative set of test cases from a test suite that
                 provides the same coverage as the entire test suite.
                 This selection is performed by identifying, and then
                 eliminating, the redundant and obsolete test cases in
                 the test suite. The representative set replaces the
                 original test suite and thus, potentially produces a
                 smaller test suite. The representative set can also be
                 used to identify those test cases that should be rerun
                 to test the program after it has been changed. Our
                 technique is independent of the testing methodology and
                 only requires an association between a testing
                 requirement and the test cases that satisfy the
                 requirement. We illustrate the technique using the data
                 flow testing methodology. The reduction that is
                 possible with our technique is illustrated by
                 experimental results.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Experimentation; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "hitting set; regression testing; software engineering;
                 software maintenance; test suite reduction",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Software --- Software Engineering
                 --- Design** (D.2.10): {\bf Methodologies**}; Software
                 --- Software Engineering --- Distribution, Maintenance,
                 and Enhancement (D.2.7): {\bf Version control}",
}

@Article{Podgurski:1993:RRS,
  author =       "Andy Podgurski and Lynn Pierce",
  title =        "Retrieving reusable software by sampling behavior",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "3",
  pages =        "286--303",
  month =        jul,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-3/p286-podgurski/p286-podgurski.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-3/p286-podgurski/",
  abstract =     "A new method, called {\em behavior sampling}, is
                 proposed for automated retrieval of reusable components
                 from software libraries. Behavior sampling exploits the
                 property of software that distinguished it from other
                 forms of test: executability. Basic behavior sampling
                 identifies relevant routines by executing candidates on
                 a searcher-supplied sample of operational inputs and by
                 comparing their output to output provided by the
                 searcher. The probabilistic basis for behavior sampling
                 is described, and experimental results are reported
                 that suggest that basic behavior sampling exhibits high
                 precision when used with small samples. Extensions to
                 basic behavior sampling are proposed to improve its
                 recall and to make it applicable to the retrieval of
                 abstract data types and object classes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Experimentation",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "behavior sampling; software libraries; software
                 retrieval; software reuse",
  subject =      "Software --- Software Engineering --- Miscellaneous
                 (D.2.m): {\bf Reusable software**}; Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf Software libraries}; Software ---
                 Software Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7); Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3)",
}

@Article{Dillon:1993:VEM,
  author =       "Laura K. Dillon",
  title =        "A visual execution model for {Ada} tasking",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "4",
  pages =        "311--345",
  month =        oct,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-4/p311-dillon/p311-dillon.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-4/p311-dillon/",
  abstract =     "A visual execution model for Ada tasking can help
                 programmers attain a deeper understanding of the
                 tasking semantics. It can illustrate subtleties in
                 semantic definitions that are not apparent in natural
                 language design. We describe a contour model of Ada
                 tasking that depicts asynchronous tasks (threads of
                 control), relationships between the environments in
                 which tasks execute, and the manner in which tasks
                 interact. The use of this high-level execution model
                 makes it possible to see what happens during execution
                 of a program. The paper provides an introduction to the
                 contour model of Ada tasking and demonstrates its
                 use.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "contour model; visual execution model",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2); Software --- Software Engineering
                 --- Programming Environments (D.2.6); Software ---
                 Programming Languages --- Formal Definitions and Theory
                 (D.3.1): {\bf Semantics}; Software --- Programming
                 Languages --- Language Classifications (D.3.2): {\bf
                 Ada}; Software --- Programming Languages --- Language
                 Constructs and Features (D.3.3): {\bf Concurrent
                 programming structures}; Software --- Programming
                 Techniques --- Concurrent Programming (D.1.3); Theory
                 of Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2): {\bf
                 Operational semantics}; Software --- Programming
                 Languages --- Processors (D.3.4): {\bf Interpreters}",
}

@Article{Wang:1993:DRT,
  author =       "Farn Wang and Aloysius K. Mok and E. Allen Emerson",
  title =        "Distributed real-time system specification and
                 verification in {APTL}",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "4",
  pages =        "346--378",
  month =        oct,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-4/p346-wang/p346-wang.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-4/p346-wang/",
  abstract =     "In this article, we propose a language, Asynchronous
                 Propositional Temporal Logic (APTL), for the
                 specification and verification of distributed hard
                 real-time systems. APTL extends the logic TPTL by
                 dealing explicitly with multiple local clocks. We
                 propose a distributed-system model which permits
                 definition of inequalities asserting the temporal
                 precedence of local clock readings. We show the
                 expressiveness of APTL through two nontrivial examples.
                 Our logic can be used to specify and reason about such
                 important properties as bounded clock rate drifting. We
                 then give a 2 2 0(n) tableau-based decision procedure
                 for determining APTL satisfiability, where {\em n\/} is
                 the size (number of bits) of the input formula.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "asynchronous; bounded clock rate drifting; multiclock
                 system model; propositional temporal logic; real-time
                 systems; specification; verification",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Operating Systems --- Organization and
                 Design (D.4.7): {\bf Real-time systems and embedded
                 systems}; Software --- Programming Languages --- Formal
                 Definitions and Theory (D.3.1): {\bf Semantics}",
}

@Article{Zave:1993:CC,
  author =       "Pamela Zave and Michael Jackson",
  title =        "Conjunction as composition",
  journal =      j-TOSEM,
  volume =       "2",
  number =       "4",
  pages =        "379--411",
  month =        oct,
  year =         "1993",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1993-2-4/p379-zave/p379-zave.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1993-2-4/p379-zave/",
  abstract =     "Partial specifications written in many different
                 specification languages can be composed if they are all
                 given semantics in the same domain, or alternatively,
                 all translated into a common style of predicate logic.
                 The common semantic domain must be very general, the
                 particular semantics assigned to each specification
                 language must be conducive to composition, and there
                 must be some means of communication that enables
                 specifications to build on one another. The criteria
                 for success are that a wide variety of specification
                 languages should be accommodated, there should be no
                 restrictions on where boundaries between languages can
                 be placed, and intuitive expectations of the specifier
                 should be met.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "compositional specification; multiparadigm
                 specification; practical specification",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}",
}

@Article{Adams:1994:CSR,
  author =       "Rolf Adams and Walter Tichy and Annette Weinert",
  title =        "The cost of selective recompilation and environment
                 processing",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "1",
  pages =        "3--28",
  month =        jan,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See comment \cite{Brett:1995:CCS,Tichy:1995:AR}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-1/p3-adams/p3-adams.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-1/p3-adams/",
  abstract =     "When a single software module in a large system is
                 modified, a potentially large number of other modules
                 may have to be recompiled. By reducing both the number
                 of compilations and the amount of input processed by
                 each compilation run, the turnaround time after changes
                 can be reduced significantly. \par

                 Potential time savings are measured in a medium-sized,
                 industrial software project over a three-year period.
                 The results indicate that a large number of
                 compilations caused by traditional compilation unit
                 dependencies may be redundant. On the available data, a
                 mechanism that compares compiler output saves about 25
                 percent, smart recompilation saves 50 percent, and
                 smartest recompilation may save up to 80 percent of
                 compilation work. \par

                 Furthermore, all compilation methods other than
                 smartest recompilation process large amounts of unused
                 environment data. In the project analyzed, the average
                 environment symbols are actually used. Reading only the
                 actually used symbols would reduce total compiler input
                 by about 50 percent. \par

                 Combining smart recompilation with a reduction in
                 environment processing might double to triple perceived
                 compilation speed and double linker speed, without
                 sacrificing static type safety.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Measurement",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "empirical analysis; environment processing; selective
                 recompilation; separate compilation; smart
                 recompilation; software evolution",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Software --- Software Engineering
                 --- Management (D.2.9): {\bf Software configuration
                 management}; Software --- Programming Languages ---
                 Processors (D.3.4); Software --- Programming Languages
                 --- Processors (D.3.4): {\bf Compilers}",
}

@Article{Forgacs:1994:DIF,
  author =       "Istv{\'a}n Forg{\'a}cs",
  title =        "Double iterative framework for flow-sensitive
                 interprocedural data flow analysis",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "1",
  pages =        "29--55",
  month =        jan,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-1/p29-forgacs/p29-forgacs.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-1/p29-forgacs/",
  abstract =     "Compiler optimization, parallel processing, data flow
                 testing, and symbolic debugging can benefit from
                 interprocedural data flow analysis. However, the live,
                 reaching definition, and most summary data flow
                 problems are theoretically intractable in the
                 interprocedural case. A method is presented that
                 reduces the exponential time bound with the help of an
                 algorithm that solves the problem in polynomial time.
                 Either the resulting sets contain precise results or
                 the missing (or additional) results do not cause any
                 problems during their use. We also introduce the double
                 iterative framework, where one procedure is processed
                 at a time. The results of the intraprocedural analysis
                 of procedures then propagates along the edges of the
                 call multi-graph. In this way the intra and
                 interprocedural analyses are executed alternately until
                 there is no change in any result set. This method can
                 be applied to any known interprocedural data flow
                 problem. Here the algorithms for the kill, live
                 variables, and reaching definitions problems are
                 presented. Besides for precision, the algorithms can be
                 used for very large programs, and since inter and
                 intraprocedural analyses can be optimized separately,
                 the method is fast as well.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Performance",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "data flow analysis; double iterative frameworks",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Data --- Files (E.5); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2); Software --- Programming Languages ---
                 Processors (D.3.4): {\bf Optimization}",
}

@Article{Morzenti:1994:OOL,
  author =       "Angelo Morzenti and Pierluigi {San Pietro}",
  title =        "Object-oriented logical specification of time-critical
                 systems",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "1",
  pages =        "56--98",
  month =        jan,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-1/p56-morzenti/p56-morzenti.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-1/p56-morzenti/",
  abstract =     "We define TRIO +, an object-oriented logical language
                 for modular system specification. TRIO + is based on
                 TRIO, a first-order temporal language that is well
                 suited to the specification of embedded and real-time
                 systems, and that provides an effective support to a
                 variety of validation activities, like specification
                 testing, simulation, and property proof. Unfortunately,
                 TRIO lacks the ability to construct specifications of
                 complex systems in a systematic and modular way. TRIO +
                 combines the use of constructs for hierarchical system
                 decomposition and object-oriented concepts like
                 inheritance and genericity with an expressive and
                 intuitive graphic notation, yielding a specification
                 language that is formal and rigorous, yet still
                 flexible, readable, general, and easily adaptable to
                 the user's needs. After introducing and motivating the
                 main features of the language, we illustrate its
                 application to a nontrivial case study extracted from a
                 real-life industrial application.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Measurement; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "first-order logic; formal specifications;
                 model-theoretic semantics; object-oriented
                 methodologies; real-time systems; temporal logic",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1): {\bf Tools}; Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf Modules
                 and interfaces}; Software --- Software Engineering ---
                 Design Tools and Techniques (D.2.2): {\bf Software
                 libraries}; Software --- Operating Systems ---
                 Organization and Design (D.4.7): {\bf Real-time systems
                 and embedded systems}; Software --- Software
                 Engineering --- Requirements/Specifications (D.2.1):
                 {\bf TRIO+}",
}

@Article{Doong:1994:AAT,
  author =       "Roong-Ko Doong and Phyllis G. Frankl",
  title =        "The {ASTOOT} approach to testing object-oriented
                 programs",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "2",
  pages =        "101--130",
  month =        apr,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-2/p101-doong/p101-doong.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-2/p101-doong/",
  abstract =     "This article describes a new approach to the unit
                 testing of object-oriented programs, a set of tools
                 based on this approach, and two case studies. In this
                 approach, each test case consists of a tuple of
                 sequences of messages, along with tags indicating
                 whether these sequences should put objects of the class
                 under test into equivalent states and/or return objects
                 that are in equivalent states. Tests are executed by
                 sending the sequences to objects of the class under
                 test, then invoking a user-supplied
                 equivalence-checking mechanism. This approach allows
                 for substantial automation of many aspects of testing,
                 including test case generation, test driver generation,
                 test execution, and test checking. Experimental
                 prototypes of tools for test generation and test
                 execution are described. The test generation tool
                 requires the availability of an algebraic specification
                 of the abstract data type being tested, but the test
                 execution tool can be used when no formal specification
                 is available. Using the test execution tools, case
                 studies involving execution of tens of thousands of
                 test cases, with various sequence lengths, parameters,
                 and combinations of operations were performed. The
                 relationships among likelihood of detecting an error
                 and sequence length, range of parameters, and relative
                 frequency of various operations were investigated for
                 priority queue and sorted-list implementations having
                 subtle errors. In each case, long sequences tended to
                 be more likely to detect the error, provided that the
                 range of parameters was sufficiently large and
                 likelihood of detecting an error tended to increase up
                 to a threshold value as the parameter range
                 increased.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Languages; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "abstract data types; algebraic specification;
                 object-oriented programming; software testing",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}; Software --- Software
                 Engineering --- Requirements/Specifications (D.2.1):
                 {\bf Languages}; Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf Object-oriented
                 languages}; Software --- Software Engineering ---
                 Testing and Debugging (D.2.5): {\bf Symbolic
                 execution}; Software --- Programming Languages ---
                 Language Constructs and Features (D.3.3): {\bf Abstract
                 data types}",
}

@Article{Dillon:1994:GIL,
  author =       "L. K. Dillon and G. Kutty and L. E. Moser and P. M.
                 Melliar-Smith and Y. S. Ramakrishna",
  title =        "A graphical interval logic for specifying concurrent
                 systems",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "2",
  pages =        "131--165",
  month =        apr,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-2/p131-dillon/p131-dillon.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-2/p131-dillon/",
  abstract =     "This article describes a graphical interval logic that
                 is the foundation of a tool set supporting formal
                 specification and verification of concurrent software
                 systems. Experience has shown that most software
                 engineers find standard temporal logics difficult to
                 understand and use. The objective of this article is to
                 enable software engineers to specify and reason about
                 temporal properties of concurrent systems more easily
                 by providing them with a logic that has an intuitive
                 graphical representation and with tools that support
                 its use. To illustrate the use of the graphical logic,
                 the article provides some specifications for an
                 elevator system and proves several properties of the
                 specifications. The article also describes the tool set
                 and the implementation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Human Factors; Languages; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "automated proof-checking; concurrent systems; formal
                 specifications; graphical interval logic; temporal
                 logic; timing diagrams; visual languages",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Specification techniques};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Mechanical verification};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Tools};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2)",
}

@Article{Parisi-Presicce:1994:ATC,
  author =       "Francesco Parisi-Presicce and Alfonso Pierantonio",
  title =        "An algebraic theory of class specification",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "2",
  pages =        "166--199",
  month =        apr,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-2/p166-parisi-presicce/p166-parisi-presicce.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-2/p166-parisi-presicce/",
  abstract =     "The notion of class (or object pattern) as defined in
                 most object-oriented languages is formalized using
                 known techniques from algebraic specifications.
                 Inheritance can be viewed as a relation between
                 classes, which suggests how classes can be arranged in
                 hierarchies. The hierarchies contain two kinds of
                 information: on the one hand, they indicate how
                 programs are structured and how code is shared among
                 classes; on the other hand, they give information about
                 compatible assignment rules, which are based on
                 subtyping. In order to distinguish between code
                 sharing, which is related to implementational aspects,
                 and functional specialization, which is connected to
                 the external behavior of objects, we introduce an
                 algebraic specification-based formalism, by which one
                 can specify the behavior of a class and state when a
                 class inherits another one. It is shown that reusing
                 inheritance can be reduced to specialization
                 inheritance with respect to a virtual class. The class
                 model and the two distinct aspects of inheritance allow
                 the definition of {\em clean\/} interconnection
                 mechanisms between classes leading to new classes which
                 inherit from old classes their correctness and their
                 semantics.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "algebraic specifications; inheritance; interconnection
                 mechanisms; modularity",
  subject =      "Software --- Programming Techniques ---
                 Object-oriented Programming (D.1.5); Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1): {\bf Methodologies (e.g., object-oriented,
                 structured)}; Software --- Software Engineering ---
                 Design** (D.2.10): {\bf Methodologies**}; Software ---
                 Programming Languages --- Formal Definitions and Theory
                 (D.3.1); Software --- Programming Languages ---
                 Language Constructs and Features (D.3.3): {\bf Abstract
                 data types}; Software --- Programming Languages ---
                 Language Constructs and Features (D.3.3): {\bf Modules,
                 packages}; Theory of Computation --- Logics and
                 Meanings of Programs --- Semantics of Programming
                 Languages (F.3.2): {\bf Algebraic approaches to
                 semantics}",
}

@Article{Bernhard:1994:RTS,
  author =       "Philip J. Bernhard",
  title =        "A reduced test suite for protocol conformance
                 testing",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "3",
  pages =        "201--220",
  month =        jul,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See comments \cite{Petrenko:1997:CRT}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-3/p201-bernhard/p201-bernhard.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-3/p201-bernhard/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Reliability; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "heuristics",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2); Computer Systems Organization --- Performance
                 of Systems (C.4)",
}

@Article{Cheon:1994:LSI,
  author =       "Yoonsik Cheon and Gary T. Leavens",
  title =        "The {Larch\slash Smalltalk} interface specification
                 language",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "3",
  pages =        "221--253",
  month =        jul,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-3/p221-cheon/p221-cheon.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-3/p221-cheon/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Documentation; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal methods; interface specification;
                 Larch/Smalltalk; Smalltalk; specification inheritance;
                 subtype; verification",
  subject =      "Software --- Programming Languages --- Language
                 Classifications (D.3.2): {\bf Larch}; Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf Smalltalk}; Software --- Software
                 Engineering --- Design Tools and Techniques (D.2.2);
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Assertions}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Invariants}; Theory of Computation ---
                 Logics and Meanings of Programs --- Specifying and
                 Verifying and Reasoning about Programs (F.3.1): {\bf
                 Pre- and post-conditions}; Theory of Computation ---
                 Logics and Meanings of Programs --- Specifying and
                 Verifying and Reasoning about Programs (F.3.1): {\bf
                 Specification techniques}",
}

@Article{Jeng:1994:SDT,
  author =       "Bingchiang Jeng and Elaine J. Weyuker",
  title =        "A simplified domain-testing strategy",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "3",
  pages =        "254--270",
  month =        jul,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-3/p254-jeng/p254-jeng.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-3/p254-jeng/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Reliability; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "domain testing; software testing",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5)",
}

@Article{Creveuil:1994:FSD,
  author =       "Christian Creveuil and Gruia-Catalin Roman",
  title =        "Formal specification and design of a message router",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "4",
  pages =        "271--307",
  month =        oct,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-4/p271-creveuil/p271-creveuil.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-4/p271-creveuil/",
  abstract =     "Formal derivation refers to a family of design
                 techniques that entail the development of programs
                 which are guaranteed to be correct by construction.
                 Only limited industrial use of such techniques (e.g.,
                 UNITY-style specification refinement) has been reported
                 in the literature, and there is a great need for
                 methodological developments aimed at facilitating their
                 application to complex problems. This article examines
                 the formal specification and design of a message router
                 in an attempt to identify those methodological elements
                 that are likely to contribute to successful industrial
                 uses of program derivation. Although the message router
                 cannot be characterized as being industrial grade, it
                 is a sophisticated problem that poses significant
                 specification and design challenges--its apparent
                 simplicity is rather deceiving. The main body of the
                 article consists of a complete formal specification of
                 the router and a series of successive refinements that
                 eventually lead to an immediate construction of a
                 correct UNITY program. Each refinement is accompanied
                 by its design rationale and is explained in a manner
                 accessible to a broad audience. We use this example to
                 make the case that program derivation provides a good
                 basis for introducing rigor in the design strategy,
                 regardless of the degrees of formality one is willing
                 to consider.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal methods; program derivation; specification
                 refinement; UNITY",
  subject =      "Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3): {\bf Distributed programming};
                 Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3): {\bf Parallel programming};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Software/Program Verification
                 (D.2.4): {\bf Correctness proofs}; Software ---
                 Software Engineering --- Design** (D.2.10): {\bf
                 Methodologies**}",
}

@Article{Felder:1994:VRT,
  author =       "Miguel Felder and Angelo Morzenti",
  title =        "Validating real-time systems by history-checking
                 {TRIO} specifications",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "4",
  pages =        "308--339",
  month =        oct,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-4/p308-felder/p308-felder.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-4/p308-felder/",
  abstract =     "We emphasize the importance of formal executable
                 specifications in the development of real-time systems,
                 as a means to assess the adequacy of the requirements
                 before a costly development process takes place. TRIO
                 is a first-order temporal logic language for executable
                 specification of real-time systems that deals with time
                 in a quantitative way by providing a metric to indicate
                 distance in time between events and length of time
                 intervals. We summarize the language and its
                 model-parametric semantics. Then we present an
                 algorithm to perform history checking, i.e., to check
                 that a history of the system satisfies the
                 specification. This algorithm can be used as a basis
                 for an effective specification testing tool. The
                 algorithm is described; an estimation of its complexity
                 is provided; and the main functionalities of the tool
                 are presented, together with sample test cases.
                 Finally, we draw conclusions and indicate directions of
                 future research.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "first-order logic; formal specifications;
                 model-theoretic semantics; requirements validation",
  subject =      "Computer Systems Organization --- Special-Purpose and
                 Application-Based Systems (C.3): {\bf Real-time and
                 embedded systems}; Software --- Software Engineering
                 --- Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Software/Program Verification
                 (D.2.4): {\bf Validation}; Software --- Operating
                 Systems --- Organization and Design (D.4.7): {\bf
                 Real-time systems and embedded systems}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Mechanical verification}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Specification techniques}",
}

@Article{Duri:1994:AEE,
  author =       "S. Duri and U. Buy and R. Devarapalli and S. M.
                 Shatz",
  title =        "Application and experimental evaluation of state space
                 reduction methods for deadlock analysis in {Ada}",
  journal =      j-TOSEM,
  volume =       "3",
  number =       "4",
  pages =        "340--380",
  month =        oct,
  year =         "1994",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1994-3-4/p340-duri/p340-duri.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1994-3-4/p340-duri/",
  abstract =     "An emerging challenge for software engineering is the
                 development of the methods and tools to aid design and
                 analysis of concurrent and distributed software. Over
                 the past few years, a number of analysis methods that
                 focus on Ada tasking have been developed. Many of these
                 methods are based on some form of reachability
                 analysis, which has the advantage of being conceptually
                 simple, but the disadvantage of being computationally
                 expensive. We explore the effectiveness of various
                 Petri net-based techniques for the automated deadlock
                 analysis of Ada programs. Our experiments consider a
                 variety of state space reduction methods both
                 individually and in various combinations. The
                 experiments are applied to a number of classical
                 concurrent programs as well as a set of ``real-world''
                 programs. The results indicate that Petri net reduction
                 and reduced state space generation are mutually
                 beneficial techniques, and that combined approaches
                 based on Petri net models are quite effective, compared
                 to alternative analysis approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Ada tasking; automatic analysis; concurrency analysis;
                 deadlock detection; experimental evaluation; state
                 space explosion",
  subject =      "Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3): {\bf Distributed programming};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf Petri
                 nets}; Software --- Software Engineering --- Testing
                 and Debugging (D.2.5): {\bf Debugging aids}",
}

@Article{Binkley:1995:PIL,
  author =       "David Binkley and Susan Horwitz and Thomas Reps",
  title =        "Program integration for languages with procedure
                 calls",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "1",
  pages =        "3--35",
  month =        jan,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-1/p3-binkley/p3-binkley.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-1/p3-binkley/",
  abstract =     "Given a program Base and two variants, A and B, each
                 created by modifying separate copies of Base, the goal
                 of program integration is to determine whether the
                 modifications interfere, and if they do not, to create
                 an integrated program that incorporates both sets of
                 changes as well as the portions of Base preserved in
                 both variants. Text-based integration techniques, such
                 as the one used by the Unix {\em diff3\/} utility, are
                 obviously unsatisfactory because one has no guarantees
                 about how the execution behavior of the integrated
                 program relates to the behaviors of Base, A, and B. The
                 first program integration algorithm to provide such
                 guarantees was developed by Horwitz, Prins, and Reps.
                 However, a limitation of that algorithm is that it only
                 applied to programs written in a restricted
                 language--in particular, the algorithm does not handle
                 programs with procedures. This article describes a
                 generalization of the Horwitz-Prins-Reps algorithm that
                 handles programs that consist of multiple (and possibly
                 mutually recursive) procedures. \par

                 We show that two straightforward generalizations of the
                 Horwitz-Prins-Reps algorithm yield unsatisfactory
                 results. The key issue in developing a satisfactory
                 algorithm is how to take into account different calling
                 contexts when determining what has changed in the
                 variants A and B. Our solution to this problem involves
                 identifying two different kinds of affected components
                 of A and B: those affected regardless of how the
                 procedure is called, and those affected by a changed or
                 new calling context. The algorithm makes use of
                 interprocedural program slicing to identify these
                 components, as well as components in Base, A, and B
                 with the same behavior.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Languages; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "control dependence; data dependence; data-flow
                 analysis; flow-insensitive summary information; program
                 dependence graph; program slicing; semantics-based
                 program integration",
  subject =      "Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7): {\bf Version
                 control}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Restructuring, reverse engineering, and
                 reengineering}; Software --- Software Engineering ---
                 Management (D.2.9); Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf
                 Programmer workbench**}; Software --- Software
                 Engineering --- Coding Tools and Techniques (D.2.3):
                 {\bf Program editors}; Software --- Programming
                 Languages --- Language Constructs and Features (D.3.3);
                 Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Compilers}",
}

@Article{Howden:1995:STA,
  author =       "W. E. Howden and Yudong Huang",
  title =        "Software trustability analysis",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "1",
  pages =        "36--64",
  month =        jan,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-1/p36-howden/p36-howden.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-1/p36-howden/",
  abstract =     "A measure of software dependability called
                 trustability is described. A program p has trustability
                 T if we are at least T confident that p is free of
                 faults. Trustability measurement depends on
                 detectability. The detectability of a method is the
                 probability that it will detect faults, when there are
                 faults present. Detectability research can be used to
                 characterize conditions under which one testing and
                 analysis method is more effective than another. Several
                 detectability results that were only previously
                 described informally, and illustrated by example, are
                 proved. Several new detectability results are also
                 proved. The trustability model characterizes the kind
                 of information that is needed to justify a given level
                 of trustability. When the required information is
                 available, the trustability approach can be used to
                 determine strategies in which methods are combined for
                 maximum effectiveness. It can be used to determine the
                 minimum amount of resources needed to guarantee a
                 required degree of trustability, and the maximum
                 trustability that is achievable with a given amount of
                 resources. Theorems proving several optimization
                 results are given. Applications of the trustability
                 model are discussed. Methods for the derivation of
                 detectability factors, the relationship between
                 trustability and operational reliability, and the
                 relationship between the software development process
                 and trustability are described.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "analysis; dependability; detectability; failure
                 density; statistical; testability; testing;
                 trustability",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Theory of Computation --- Logics and
                 Meanings of Programs --- Specifying and Verifying and
                 Reasoning about Programs (F.3.1)",
}

@Article{Young:1995:CAT,
  author =       "Michal Young and Richard N. Taylor and David L. Levine
                 and Kari A. Nies and Debra Brodbeck",
  title =        "A concurrency analysis tool suite for {Ada} programs:
                 rationale, design, and preliminary experience",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "1",
  pages =        "65--106",
  month =        jan,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-1/p65-young/p65-young.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-1/p65-young/",
  abstract =     "Cats (Concurrency Analysis Tool Suite) is designed to
                 satisfy several criteria: it must analyze
                 implementation-level Ada source code and check
                 user-specified conditions associated with program
                 source code; it must be modularized in a fashion that
                 supports flexible composition with other tool
                 components, including integration with a variety of
                 testing and analysis techniques; and its performance
                 and capacity must be sufficient for analysis of real
                 application programs. Meeting these objectives together
                 is significantly more difficult than meeting any of
                 them alone. We describe the design and rationale of
                 Cats and report experience with an implementation. The
                 issues addressed here are primarily practical concerns
                 for modularizing and integrating tools for analysis of
                 actual source programs. We also report successful
                 application of Cats to major subsystems of a (nontoy)
                 highly concurrent user interface system.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Ada; concurrency; software development environments;
                 static analysis; tool integration",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2); Software --- Software Engineering
                 --- Testing and Debugging (D.2.5); Software ---
                 Software Engineering --- Programming Environments
                 (D.2.6); Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf Concurrent,
                 distributed, and parallel languages}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Concurrent programming
                 structures}",
}

@Article{Jackson:1995:ADB,
  author =       "Daniel Jackson",
  title =        "Aspect: detecting bugs with abstract dependences",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "2",
  pages =        "109--145",
  month =        apr,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-2/p109-jackson/p109-jackson.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-2/p109-jackson/",
  abstract =     "Aspect is a static analysis technique for detecting
                 bugs in imperative programs, consisting of an
                 annotation language and a checking tool. Like a type
                 declaration, an Aspect annotation of a procedure is a
                 kind of declarative, partial specification that can be
                 checked efficiently in a modular fashion. But instead
                 of constraining the types of arguments and results,
                 Aspect specifications assert dependences that should
                 hold between inputs and outputs. The checker uses a
                 simple dependence analysis to check code against
                 annotations and can find bugs automatically that are
                 not detectable by other static means, especially errors
                 of omission, which are common, but resistant to type
                 checking. This article explains the basic scheme and
                 shows how it is elaborated to handle data abstraction
                 and aliasing.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Documentation; Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "dataflow dependences; partial specification; partial
                 verification",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Modules and interfaces};
                 Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Assertion checkers};
                 Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Symbolic execution}; Software
                 --- Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Abstract data types}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Assertions}; Theory of Computation ---
                 Logics and Meanings of Programs --- Specifying and
                 Verifying and Reasoning about Programs (F.3.1): {\bf
                 Mechanical verification}; Theory of Computation ---
                 Logics and Meanings of Programs --- Specifying and
                 Verifying and Reasoning about Programs (F.3.1): {\bf
                 Pre- and post-conditions}",
}

@Article{Zaremski:1995:SMT,
  author =       "Amy Moormann Zaremski and Jeannette M. Wing",
  title =        "Signature matching: a tool for using software
                 libraries",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "2",
  pages =        "146--170",
  month =        apr,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-2/p146-zaremski/p146-zaremski.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-2/p146-zaremski/",
  abstract =     "{\em Signature matching\/} is a method for organizing,
                 navigating through, and retrieving from software
                 libraries. We consider two kinds of software library
                 components--functions and modules--and hence two kinds
                 of matching--function matching and module matching. The
                 signature of a function is simply its type; the
                 signature of a module is a multiset of user-defined
                 types and a multiset of function signatures. For both
                 functions and modules, we consider not just {\em
                 exact\/} match but also various flavors of {\em
                 relaxed\/} match. We describe various applications of
                 signature matching as a tool for using software
                 libraries, inspired by the use of our implementation of
                 a function signature matcher written in Standard ML.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "signature matching; software retrieval",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Modules and interfaces};
                 Software --- Software Engineering --- Miscellaneous
                 (D.2.m): {\bf Reusable software**}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Data types and structures};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Software libraries}",
}

@Article{Pezze:1995:GMR,
  author =       "Mauro Pezz{\`e} and Richard N. Taylor and Michal
                 Young",
  title =        "Graph models for reachability analysis of concurrent
                 programs",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "2",
  pages =        "171--213",
  month =        apr,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-2/p171-pezze/p171-pezze.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-2/p171-pezze/",
  abstract =     "The problem of analyzing concurrent systems has been
                 investigated by many researchers, and several solutions
                 have been proposed. Among the proposed techniques,
                 reachability analysis--systematic enumeration of
                 reachable states in a finite-state model--is attractive
                 because it is conceptually simple and relatively
                 straightforward to automate and can be used in
                 conjunction with model-checking procedures to check for
                 application-specific as well as general properties.
                 This article shows that the nature of the translation
                 from source code to a modeling formalism is of greater
                 practical importance than the underlying formalism.
                 Features identified as pragmatically important are the
                 representation of internal choice, selection of a
                 dynamic or static matching rule, and the ease of
                 applying reductions. Since combinatorial explosion is
                 the primary impediment to application of reachability
                 analysis, a particular concern in choosing a model is
                 facilitating divide-and-conquer analysis of large
                 programs. Recently, much interest in finite-state
                 verification systems has centered on algebraic theories
                 of concurrency. Algebraic structure can be used to
                 decompose reachability analysis based on a flowgraph
                 model. The semantic equivalence of graph and Petri
                 net-based models suggests that one ought to be able to
                 apply a similar strategy for decomposing Petri nets. We
                 describe how category-theoretic treatments of Petri
                 nets provide a basis for decomposition of Petri net
                 reachability analysis.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Ada tasking; process algebra; static analysis",
  subject =      "Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3); Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf Petri
                 nets}; Software --- Software Engineering --- Testing
                 and Debugging (D.2.5): {\bf Debugging aids}; Software
                 --- Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Concurrent programming
                 structures}; Theory of Computation --- Logics and
                 Meanings of Programs --- Specifying and Verifying and
                 Reasoning about Programs (F.3.1): {\bf Mechanical
                 verification}",
}

@Article{Brett:1995:CCS,
  author =       "Bevin R. Brett",
  title =        "Comments on {``The cost of selective recompilation and
                 environment processing''}",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "2",
  pages =        "214--216",
  month =        apr,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See \cite{Adams:1994:CSR,Tichy:1995:AR}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-2/p214-brett/p214-brett.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-2/p214-brett/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Measurement",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Compilers}; Software --- Software
                 Engineering --- Programming Environments (D.2.6);
                 Software --- Software Engineering --- Management
                 (D.2.9): {\bf Software configuration management};
                 Software --- Programming Languages --- Processors
                 (D.3.4)",
}

@Article{Tichy:1995:AR,
  author =       "Walter Tichy and Rolf Adams and Annette Weinert",
  title =        "Authors' response",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "2",
  pages =        "217--219",
  month =        apr,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See \cite{Adams:1994:CSR,Brett:1995:CCS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-2/p217-tichy/p217-tichy.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-2/p217-tichy/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Measurement",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Compilers}; Software --- Software
                 Engineering --- Programming Environments (D.2.6);
                 Software --- Software Engineering --- Management
                 (D.2.9): {\bf Software configuration management};
                 Software --- Programming Languages --- Processors
                 (D.3.4)",
}

@Article{Sutton:1995:ALS,
  author =       "Stanley M. Sutton and Dennis Heimbigner and Leon J.
                 Osterweil",
  title =        "{APPL/A}: a language for software process
                 programming",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "3",
  pages =        "221--286",
  month =        jul,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-3/p221-sutton/p221-sutton.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-3/p221-sutton/",
  abstract =     "Software process programming is the coding of software
                 processes in executable programming languages. Process
                 programming offers many potential benefits, but their
                 realization has been hampered by a lack of experience
                 in the design and use of process programming languages.
                 APPL/A is a prototype software process programming
                 language developed to help gain this experience. It is
                 intended for the coding of programs to represent and
                 support software processes including process, product,
                 and project management. APPL/A is defined as an
                 extension to Ada, to which it adds persistent
                 programmable relations, concurrent triggers on relation
                 operations (for reactive control), optionally and
                 dynamically enforceable predicates on relations (which
                 may serve as constraints), and composite statements
                 that provide alternative combinations of
                 serializability, atomicity, and consistency enforcement
                 (for programming high-level transactions). APPL/A has
                 been used to code engineering-oriented applications,
                 like requirements specification and design, as well as
                 management-related activities, such as personnel
                 assignment, task scheduling, and project monitoring.
                 APPL/A has also enabled us to experiment with process
                 program design techniques and architectures, including
                 process state reification, intermittent (or persistent)
                 processes, reflexive and metaprocesses, and
                 multiple-process systems. Our ability to address a wide
                 range of software processes and process characteristics
                 indicates that the APPL/A constructs represent
                 important and general capabilities for software process
                 programming. \par

                 -- {\em Authors' Abstract\/}",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "consistency management; multiparadigm programming
                 languages; software process programming; transaction
                 management",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2); Software --- Programming Languages
                 --- Language Classifications (D.3.2): {\bf Ada};
                 Software --- Programming Languages --- Language
                 Classifications (D.3.2): {\bf APPL}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3); Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Database
                 (persistent) programming languages}",
}

@Article{Callison:1995:TSO,
  author =       "H. Rebecca Callison",
  title =        "A time-sensitive object model for real-time systems",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "3",
  pages =        "287--317",
  month =        jul,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-3/p287-callison/p287-callison.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-3/p287-callison/",
  abstract =     "Process-oriented models for real-time systems focus on
                 the timing constraints of {\em processes}, a focus that
                 can adversely affect resulting designs. Data
                 dependencies between processes create scheduling
                 interactions that limit the times at which processes
                 may execute. Processes are then designed to fit
                 available windows in the overall system schedule.
                 ``Fitting in'' frequently involves fragmenting
                 processes to fit scheduling windows and/or designing
                 program and data structures for speed rather than for
                 program comprehension. The result is often a system
                 with very sensitive timing that is hard to understand
                 and maintain. As an alternative to process-oriented
                 design, we present time-sensitive objects: a
                 data-oriented model for real-time systems. The
                 time-sensitive object (TSO) model structures systems as
                 time-constrained data, rather than time constrained
                 processing. Object values are extended to object
                 histories in which a sequence of time constrained
                 values describe the evolution of the object over time.
                 Systems comprise a set of objects and their
                 dependencies. The TSO model describes the effects of
                 object operations and the propagation of change among
                 related objects. Periodic objects, a class of objects
                 within the TSO model, are described in detail in this
                 article and compared with traditional periodic
                 processes. Advantages of time-sensitive objects are
                 identified, including greater scheduling independence
                 when processes have data dependencies, more opportunity
                 for concurrency, and greater inherent capability for
                 detection of and tolerance to timing errors.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Performance",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "concurrency; fault tolerance; object models;
                 programming techniques; real-time processing models;
                 timing constraints",
  subject =      "Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3); Software --- Software Engineering
                 --- Design** (D.2.10); Software --- Operating Systems
                 --- Reliability (D.4.5): {\bf Fault-tolerance};
                 Software --- Operating Systems --- Organization and
                 Design (D.4.7): {\bf Real-time systems and embedded
                 systems}",
}

@Article{Abowd:1995:FSU,
  author =       "Gregory D. Abowd and Robert Allen and David Garlan",
  title =        "Formalizing style to understand descriptions of
                 software architecture",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "4",
  pages =        "319--364",
  month =        oct,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-4/p319-abowd/p319-abowd.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-4/p319-abowd/",
  abstract =     "The software architecture of most systems is usually
                 described informally and diagrammatically by means of
                 boxes and lines. In order for these descriptions to be
                 meaningful, the diagrams are understood by interpreting
                 the boxes and lines in specific, conventionalized ways.
                 The informal, imprecise nature of these interpretations
                 has a number of limitations. In this article we
                 consider these conventionalized interpretations as
                 architectural styles and provide a formal framework for
                 their uniform definition. In addition to providing a
                 template for precisely defining new architectural
                 styles, this framework allows for analysis within and
                 between different architectural styles.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "software architecture; Z notation",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Specification techniques};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Z}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2): {\bf
                 Denotational semantics}; Software --- Software
                 Engineering --- Design Tools and Techniques (D.2.2);
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1): {\bf Languages}; Software --- Software
                 Engineering --- Design Tools and Techniques (D.2.2):
                 {\bf Modules and interfaces}",
}

@Article{Jackson:1995:SZS,
  author =       "Daniel Jackson",
  title =        "Structuring {Z} specifications with views",
  journal =      j-TOSEM,
  volume =       "4",
  number =       "4",
  pages =        "365--389",
  month =        oct,
  year =         "1995",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1995-4-4/p365-jackson/p365-jackson.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1995-4-4/p365-jackson/",
  abstract =     "A view is a partial specification of a program,
                 consisting of a state space and a set of operations. A
                 full specification is obtained by composing several
                 views, linking them through their states (by asserting
                 invariants across views) and through their operations
                 (by defining external operations as combinations of
                 operations from different views). By encouraging
                 multiple representations of the program's state, view
                 structuring lends clarity and terseness to the
                 specification of operations. And by separating
                 different aspects of functionality, it brings
                 modularity at the grossest level of organization, so
                 that specifications can accommodate change more
                 gracefully. View structuring in Z is demonstrated with
                 a few small examples. Both the features of Z that lend
                 themselves to view structuring and those that are a
                 hindrance are discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal specification; implicit definition; views; Z",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Specification techniques};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Design** (D.2.10): {\bf
                 Representation**}; Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf Modules
                 and interfaces}; Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Z}",
}

@Article{vandenBrand:1996:GFC,
  author =       "Mark van den Brand and Eelco Visser",
  title =        "Generation of formatters for context-free languages",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "1",
  pages =        "1--41",
  month =        jan,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 31 06:33:29 2003",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-1/p1-van\_den\_brand/p1-van\_den\_brand.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-1/p1-van\_den\_brand/;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-1/p1-van_den_brand/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Devanbu:1996:GTA,
  author =       "Premkumar T. Devanbu and David S. Rosenblum and
                 Alexander L. Wolf",
  title =        "Generating testing and analysis tools with {Aria}",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "1",
  pages =        "42--62",
  month =        jan,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-1/p42-devanbu/p42-devanbu.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-1/p42-devanbu/",
  abstract =     "Many software testing and analysis tools manipulate
                 graph representations of programs, such as abstract
                 syntax trees or abstract semantics graphs. Handcrafting
                 such tools in conventional programming languages can be
                 difficult, error prone, and time consuming. Our
                 approach is to use application generators targeted for
                 the domain of graph-representation-based testing and
                 analysis tools. Moreover, we generate the generators
                 themselves, so that the development of tools based on
                 different languages and/or representations can also be
                 supported better. In this article we report on our
                 experiences in developing and using a system called
                 Aria that generates testing and analysis tools based on
                 an abstract semantics graph representation for C and
                 C++ called Reprise. Aria itself was generated by the
                 Genoa system. We demonstrate the utility of Aria and,
                 thereby, the power of our approach, by showing Aria's
                 use in the development of a number of useful testing
                 and analysis tools.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Design; Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "application generators; Aria; Genoa; program
                 dependence graphs; program representations; Reprise;
                 software analysis; software testing; tools",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Software libraries}; Software
                 --- Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Control structures}; Software
                 --- Programming Languages --- Processors (D.3.4): {\bf
                 Code generation}; Data --- Data Structures (E.1): {\bf
                 Graphs and networks}; Software --- Programming
                 Languages --- Language Constructs and Features (D.3.3):
                 {\bf Data types and structures}; Software ---
                 Programming Languages --- Processors (D.3.4): {\bf
                 Parsing}; Software --- Software Engineering --- Metrics
                 (D.2.8): {\bf Complexity measures}; Software ---
                 Software Engineering --- Testing and Debugging
                 (D.2.5)",
}

@Article{Ferguson:1996:CAS,
  author =       "Roger Ferguson and Bogdan Korel",
  title =        "The chaining approach for software test data
                 generation",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "1",
  pages =        "63--86",
  month =        jan,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-1/p63-ferguson/p63-ferguson.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-1/p63-ferguson/",
  abstract =     "Software testing is very labor intensive and expensive
                 and accounts for a significant portion of software
                 system development cost. If the testing process could
                 be automated, the cost of developing software could be
                 significantly reduced. Test data generation in program
                 testing is the process of identifying a set of test
                 data that satisfies a selected testing criterion, such
                 as statement coverage and branch coverage. In this
                 article we present a {\em chaining approach\/} for
                 automated software test data generation which builds on
                 the current theory of execution-oriented test data
                 generation. In the chaining approach, test data are
                 derived based on the actual execution of the program
                 under test. For many programs, the execution of the
                 selected statement may require prior execution of some
                 other statements. The existing methods of test data
                 generation may not efficiently generate test data for
                 these types of programs because they only use control
                 flow information of a program during the search
                 process. The chaining approach uses data dependence
                 analysis to guide the search process, i.e., data
                 dependence analysis automatically identifies statements
                 that affect the execution of the selected statement.
                 The chaining approach uses these statements to form a
                 sequence of statements that is to be executed prior to
                 the execution of the selected statement. The
                 experiments have shown that the chaining approach may
                 significantly improve the chances of finding test data
                 as compared to the existing methods of automated test
                 data generation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Measurement; Performance",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "data dependency; dynamic analysis; heuristics; program
                 execution",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}",
}

@Article{Weyuker:1996:UFC,
  author =       "Elaine J. Weyuker",
  title =        "Using failure cost information for testing and
                 reliability assessment",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "2",
  pages =        "87--98",
  month =        apr,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-2/p87-weyuker/p87-weyuker.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-2/p87-weyuker/",
  abstract =     "A technique for incorporating failure cost information
                 into algorithms designed to automatically generate
                 software-load-testing suites is presented. A previously
                 introduced reliability measure is also modified to
                 incorporate this cost information. examples are
                 presented to show the usefulness of including cost
                 information when testing or assessing software.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Measurement; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "failure cost; software testing; test case selection",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Software --- Software Engineering
                 --- General (D.2.0)",
}

@Article{Offutt:1996:EDS,
  author =       "A. Jefferson Offutt and Ammei Lee and Gregg Rothermel
                 and Roland H. Untch and Christian Zapf",
  title =        "An experimental determination of sufficient mutant
                 operators",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "2",
  pages =        "99--118",
  month =        apr,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-2/p99-offutt/p99-offutt.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-2/p99-offutt/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Measurement; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5)",
}

@Article{Bergadano:1996:TMI,
  author =       "Francesco Bergadano and Daniele Gunetti",
  title =        "Testing by means of inductive program learning",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "2",
  pages =        "119--145",
  month =        apr,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-2/p119-bergadano/p119-bergadano.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-2/p119-bergadano/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "program induction by examples",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Computing Methodologies ---
                 Artificial Intelligence --- Learning (I.2.6)",
}

@Article{Snelting:1996:RCB,
  author =       "Gregor Snelting",
  title =        "Reengineering of configurations based on mathematical
                 concept analysis",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "2",
  pages =        "146--189",
  month =        apr,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-2/p146-snelting/p146-snelting.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-2/p146-snelting/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Management; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "concept analysis; concept lattices",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6): {\bf Interactive environments};
                 Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7); Software ---
                 Software Engineering --- Management (D.2.9): {\bf
                 Software configuration management}",
}

@Article{Cugola:1996:FFI,
  author =       "Gianpaolo Cugola and Elisabetta {Di Nitto} and Alfonso
                 Fuggetta and Carlo Ghezzi",
  title =        "A framework for formalizing inconsistencies and
                 deviations in human-centered systems",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "3",
  pages =        "191--230",
  month =        jul,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-3/p191-cugola/p191-cugola.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-3/p191-cugola/",
  abstract =     "Most modern business activities are carried out by a
                 combination of computerized tools and human agents.
                 Typical examples are engineering design activities,
                 office procedures, and banking systems. All these {\em
                 human-centered systems\/} are characterized by the
                 interaction among people, and between people and
                 computerized tools. This interaction defines a process,
                 whose effectiveness is essential to ensure the quality
                 of the delivered products and/or services. To support
                 these systems, process-centered environments and
                 workflow management systems have been recently
                 developed. They can be collectively identified with the
                 term {\em process technology}. This technology is based
                 on the explicit definition of the process to be
                 followed (the {\em process model\/} ). The model
                 specifies the kind of support that has to be provided
                 to human agents. An essential property that process
                 technology mut exhibit is the ability of tolerating,
                 controlling, and supporting {\em deviations\/} and {\em
                 inconsistencies\/} of the real-world behaviors with
                 respect to the process model. This is necessary to
                 provide consistent and effective support to the
                 human-centered system, still maintaining a high degree
                 of flexibility and adaptability to the evolving needs,
                 preferences, an expertise of the human agents. This
                 article presents a formal framework to characterize the
                 interaction between a human-centered system and its
                 automated support. It does not aim at introducing a new
                 language or system to describe processes. Rather, it
                 aims at identifying the basic properties and features
                 that make it possible to formally define the concepts
                 of inconsistency and deviation. This formal framework
                 can then be used to compare existing solutions and
                 guide future research work.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Human Factors; Management; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "business processes; deviation; formal model;
                 human-centered systems; inconsistency; software
                 processes",
  subject =      "Information Systems --- Models and Principles ---
                 User/Machine Systems (H.1.2); Software --- Software
                 Engineering --- Programming Environments (D.2.6);
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Software Management (K.6.3)",
}

@Article{Heitmeyer:1996:ACC,
  author =       "Constance L. Heitmeyer and Ralph D. Jeffords and Bruce
                 G. Labaw",
  title =        "Automated consistency checking of requirements
                 specifications",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "3",
  pages =        "231--261",
  month =        jul,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-3/p231-heitmeyer/p231-heitmeyer.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-3/p231-heitmeyer/",
  abstract =     "This article describes a formal analysis technique,
                 called {\em consistency checking}, for automatic
                 detection of errors, such as type errors,
                 nondeterminism, missing cases, and circular
                 definitions, in requirements specifications. The
                 technique is designed to analyze requirements
                 specifications expressed in the SCR (Software Cost
                 Reduction) tabular notation. As background, the SCR
                 approach to specifying requirements is reviewed. To
                 provide a formal semantics for the SCR notation and a
                 foundation for consistency checking, a formal
                 requirements model is introduced; the model represents
                 a software system as a finite-state automation which
                 produces externally visible outputs in response to
                 changes in monitored environmental quantities. Results
                 of two experiments are presented which evaluated the
                 utility and scalability of our technique for
                 consistency checking in real-world avionics
                 application. The role of consistency checking during
                 the requirements phase of software development is
                 discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Management; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "application-independent properties; consistency
                 checking; formal requirements modeling; software cost
                 reduction methodology; tabular notations",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4); Software --- Software Engineering
                 --- Requirements/Specifications (D.2.1); Computing
                 Milieux --- Management of Computing and Information
                 Systems --- Software Management (K.6.3); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2)",
}

@Article{Murphy:1996:LLS,
  author =       "Gail C. Murphy and David Notkin",
  title =        "Lightweight lexical source model extraction",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "3",
  pages =        "262--292",
  month =        jul,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-3/p262-murphy/p262-murphy.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-3/p262-murphy/",
  abstract =     "Software engineers maintaining an existing software
                 system often depend on the mechanized extraction of
                 information from system artifacts. Some useful kinds of
                 information--source models--are well known: call
                 graphs, file dependences, etc. Predicting every kind of
                 source model that a software engineer may need is
                 impossible. We have developed a lightweight approach
                 for generating flexible and tolerant source model
                 extractors from lexical specifications. The approach is
                 lightweight in that the specifications are relatively
                 small and easy to write. It is flexible in that there
                 are few constraints on the kinds of artifacts from
                 which source models are extracted (e.g., we can extract
                 from source code, structured data files, documentation,
                 etc.). It is tolerant in that there are few constraints
                 on the condition of the artifacts. For example, we can
                 extract from source that cannot necessarily be
                 compiled. Our approach extended the kinds of source
                 models that can be easily produced from lexical
                 information while avoiding the constraints and
                 brittleness of most parser-based approaches. We have
                 developed tools to support this approach and applied
                 the tools to the extraction of a number of different
                 source models (file dependences, event interactions,
                 call graphs) from a variety of system artifacts (C,
                 C++, CLOS, Eiffel. TCL, structured data). We discuss
                 our approach and describe its application to extract
                 source models not available using existing systems; for
                 example, we compute the implicitly-invokes relation
                 over Field tools. We compare and contrast our approach
                 to the conventional lexical and syntactic approaches of
                 generating source models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Experimentation; Languages; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "lexical analysis; lexing; reverse engineering; scanner
                 generation; scanning; software maintenance; source code
                 analysis; source model; static analysis",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2); Software --- Programming Languages
                 --- Processors (D.3.4); Software --- Software
                 Engineering --- General (D.2.0)",
}

@Article{Harel:1996:SSS,
  author =       "David Harel and Amnon Naamad",
  title =        "The {STATEMATE} semantics of statecharts",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "4",
  pages =        "293--333",
  month =        oct,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-4/p293-harel/p293-harel.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-4/p293-harel/",
  abstract =     "We describe the semantics of statecharts as
                 implemented in the STATEMATE system. This was the first
                 executable semantics defined for the language and has
                 been in use for almost a decade. In terms of the
                 controversy around whether changes made in a given step
                 should take effect in the current step or in the next
                 one, this semantics adopts the latter approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "behavioral modeling; reactive system; semantics;
                 statechart; STATEMATE",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2); Software --- Programming Languages
                 --- Formal Definitions and Theory (D.3.1): {\bf
                 Semantics}; Theory of Computation --- Logics and
                 Meanings of Programs --- Semantics of Programming
                 Languages (F.3.2)",
}

@Article{Cheung:1996:CCC,
  author =       "Shing Chi Cheung and Jeff Kramer",
  title =        "Context constraints for compositional reachability
                 analysis",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "4",
  pages =        "334--377",
  month =        oct,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-4/p334-cheung/p334-cheung.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-4/p334-cheung/",
  abstract =     "Behavior analysis of complex distributed systems has
                 led to the search for enhanced reachability analysis
                 techniques which support modularity and which control
                 the state explosion problem. While modularity has been
                 achieved, state explosion in still a problem. Indeed,
                 this problem may even be exacerbated, as a locally
                 minimized subsystem may contain many states and
                 transitions forbidden by its environment or context.
                 Context constraints, specified as interface processes,
                 are restrictions imposed by the environment on
                 subsystem behavior. Recent research has suggested that
                 the state explosion problem can be effectively
                 controlled if context constraints are incorporated in
                 compositional reachability analysis (CRA). Although
                 theoretically very promising, the approach has rarely
                 been used in practice because it generally requires a
                 more complex computational model and does not contain a
                 mechanism to derive context constraints automatically.
                 This article presents a technique to automate the
                 approach while using a similar computational model to
                 that of CRA. Context constraints are derived
                 automatically, based on a set of sufficient conditions
                 for these constraints to be transparently included when
                 building reachability graphs. As a result, the global
                 reachability graph generated using the derived
                 constraints is shown to be observationally equivalent
                 to that generated by CRA without the inclusion of
                 context constraints. Constraints can also be specified
                 explicitly by users, based on their application
                 knowledge. Erroneous constraints which contravene
                 transparency can be identified together with an
                 indication of the error sources. User-specified
                 constraints can be combined with those generated
                 automatically. The technique is illustrated using a
                 clients/server system and other examples.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Reliability; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "compositional techniques; concurrency; context
                 constraints; distributed systems; labeled transition
                 systems; reachability analysis; state space reduction;
                 static analysis; validation",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2); Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf Concurrent,
                 distributed, and parallel languages}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Concurrent programming
                 structures}; Theory of Computation --- Logics and
                 Meanings of Programs --- Specifying and Verifying and
                 Reasoning about Programs (F.3.1)",
}

@Article{Barrett:1996:FEB,
  author =       "Daniel J. Barrett and Lori A. Clarke and Peri L. Tarr
                 and Alexander E. Wise",
  title =        "A framework for event-based software integration",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "4",
  pages =        "378--421",
  month =        oct,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-4/p378-barrett/p378-barrett.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-4/p378-barrett/",
  abstract =     "Although event-based software integration is one of
                 the most prevalent approaches to loose integration, no
                 consistent model for describing it exists. As a result,
                 there is no uniform way to discuss event-based
                 integration, compare approaches and implementations,
                 specify new event-based approaches, or match user
                 requirements with the capabilities of event-based
                 integration systems. We attempt to address these
                 shortcomings by specifying a {\em generic framework for
                 event-based integration}, the EBI framework, that
                 provides a flexible, object-oriented model for
                 discussing and comparing event-based integration
                 approaches. The EBI framework can model dynamic and
                 static specification, composition, and decomposition
                 and can be instantiated to describe the features of
                 most common event-based integration approaches. We
                 demonstrate how to use the framework as a reference
                 model by comparing and contrasting three well-known
                 integration systems: FIELD, Polylith, and CORBA.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "control integration; CORBA; event-based systems;
                 FIELD; interoperability; Polylith; reference model;
                 software integration",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2)",
}

@Article{Anonymous:1996:AI,
  author =       "Anonymous",
  title =        "Author Index",
  journal =      j-TOSEM,
  volume =       "5",
  number =       "4",
  pages =        "422--423",
  month =        oct,
  year =         "1996",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:05:47 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1996-5-4/p422-authorindex/p422-authorindex.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-4/p422-author\_index/;
                 http://www.acm.org/pubs/citations/journals/tosem/1996-5-4/p422-author_index/",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zave:1997:FDC,
  author =       "Pamela Zave and Michael Jackson",
  title =        "Four dark corners of requirements engineering",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "1",
  pages =        "1--30",
  month =        jan,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-1/p1-zave/p1-zave.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-1/p1-zave/",
  abstract =     "Research in requirements engineering has produced an
                 extensive body of knowledge, but there are four areas
                 in which the foundation of the discipline seems weak or
                 obscure. This article shines some light in the ``four
                 dark corners,'' exposing problems and proposing
                 solutions. We show that all descriptions involved in
                 requirements engineering should be descriptions of the
                 environment. We show that certain control information
                 is necessary for sound requirements engineering, and we
                 explain the close association between domain knowledge
                 and refinement of requirements. Together these
                 conclusions explain the precise nature of requirements,
                 specifications, and domain knowledge, as well as the
                 precise nature of the relationships among them. They
                 establish minimum standards for what information should
                 be represented in a requirements language. They also
                 make it possible to determine exactly what it means for
                 requirements and engineering to be successfully
                 completed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "control of actions; domain knowledge; implementation
                 bias; refinement of requirements",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}",
}

@Article{Moser:1997:GED,
  author =       "L. E. Moser and Y. S. Ramakrishna and G. Kutty and P.
                 M. Melliar-Smith and L. K. Dillon",
  title =        "A graphical environment for the design of concurrent
                 real-time systems",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "1",
  pages =        "31--79",
  month =        jan,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-1/p31-moser/p31-moser.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-1/p31-moser/",
  abstract =     "Concurrent real-time systems are among the most
                 difficult systems to design because of the many
                 possible interleavings of events and because of the
                 timing requirements that must be satisfied. We have
                 developed a graphical environment based on Real-Time
                 Graphical Interval Logic (RTGIL) for specifying and
                 reasoning about the designs of concurrent real-time
                 systems. Specifications in the logic have an intuitive
                 graphical representation that resembles the timing
                 diagrams drawn by software and hardware engineers, with
                 real-time constraints that bound the durations of
                 intervals. The syntax-directed editor of the RTGIL
                 environment enables the user to compose and edit
                 graphical formulas on a workstation display; the
                 automated theorem prover mechanically checks the
                 validity of proofs in the logic; and the database and
                 proof manager tracks proof dependencies and allows
                 formulas to be stored and retrieved. This article
                 describes the logic, methodology, and tools that
                 comprise the prototype RTGIL environment and
                 illustrates the use of the environment with an example
                 application.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "automated deduction; concurrent systems; formal
                 specification and verification; graphical user
                 interface; real-time systems; temporal logic",
  subject =      "Computer Systems Organization --- Special-Purpose and
                 Application-Based Systems (C.3): {\bf Real-time and
                 embedded systems}; Software --- Software Engineering
                 --- Requirements/Specifications (D.2.1): {\bf
                 Methodologies (e.g., object-oriented, structured)};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Tools};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Design** (D.2.10): {\bf Methodologies**}; Software
                 --- Software Engineering --- Design** (D.2.10): {\bf
                 Representation**}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Mechanical theorem
                 proving}; Theory of Computation --- Mathematical Logic
                 and Formal Languages --- Formal Languages (F.4.3): {\bf
                 Decision problems}",
}

@Article{Dillon:1997:TDT,
  author =       "Laura K. Dillon",
  title =        "Task dependence and termination in {Ada}",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "1",
  pages =        "80--110",
  month =        jan,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-1/p80-dillon/p80-dillon.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-1/p80-dillon/",
  abstract =     "This article analyzes the semantics of task dependence
                 and termination in Ada. We use a contour model of Ada
                 tasking in examining the implications of and possible
                 motivation for the rules that determine when procedures
                 and tasks terminate during execution of an Ada program.
                 The termination rules prevent the data that belong to
                 run-time instances of scope units from being
                 deallocated prematurely, but they are unnecessarily
                 conservative in this regard. For task instances that
                 are created by invoking a storage allocator, we show
                 that the conservative termination policy allows heap
                 storage to be managed more efficiently than a less
                 conservative policy. The article also examines the
                 manner in which the termination rules affect the
                 synchronization of concurrent tasks. Master-slave and
                 client-server applications are considered. We show that
                 the rules for distributed termination of concurrent
                 tasks guarantee that a task terminates only if it can
                 no longer affect the outcome of an execution. The
                 article is meant to give programmers a better
                 understanding of Ada tasking and to help language
                 designers assess the strengths and weaknesses of the
                 termination model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Ada tasking; distributed termination; master/dependent
                 relation; task termination; tasking execution model",
  subject =      "Software --- Programming Languages --- Language
                 Constructs and Features (D.3.3): {\bf Concurrent
                 programming structures}; Software --- Programming
                 Languages --- Language Classifications (D.3.2): {\bf
                 Ada}",
}

@Article{Henninger:1997:EAC,
  author =       "Scott Henninger",
  title =        "An evolutionary approach to constructing effective
                 software reuse repositories",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "2",
  pages =        "111--140",
  month =        apr,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-2/p111-henninger/p111-henninger.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-2/p111-henninger/",
  abstract =     "Repositories for software reuse are faced with two
                 interrelated problems: (1) acquiring the knowledge to
                 initially construct the repository and (2) modifying
                 the repository to meet the evolving and dynamic needs
                 of software development organizations. Current software
                 repository methods rely heavily on classification,
                 which exacerbates acquisition and evolution problems by
                 requiring costly classification and domain analysis
                 efforts before a repository can be used effectively,
                 This article outlines an approach that avoids these
                 problems by choosing a retrieval method that utilizes
                 minimal repository structure to effectively support the
                 process of finding software components. The approach is
                 demonstrated through a pair of proof-of-concept
                 prototypes: PEEL, a tool to semiautomatically identify
                 reusable components, and CodeFinder, a retrieval system
                 that compensates for the lack of explicit knowledge
                 structures through a spreading activation retrieval
                 process. CodeFinder also allows component
                 representations to be modified while users are
                 searching for information. This mechanism adapts to the
                 changing nature of the information in the repository
                 and incrementally improves the repository while people
                 use it. The combination of these techniques holds
                 potential for designing software repositories that
                 minimize up-front costs, effectively support the search
                 process, and evolve with an organization's changing
                 needs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "component repositories; information retrieval;
                 software reuse",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Software libraries}; Software
                 --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf User interfaces}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Query
                 formulation}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Retrieval models}",
}

@Article{Devanbu:1997:UDL,
  author =       "Premkumar Devanbu and Mark A. Jones",
  title =        "The use of description logics in {KBSE} systems",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "2",
  pages =        "141--172",
  month =        apr,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-2/p141-devalbu/p141-devalbu.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-2/p141-devanbu/",
  abstract =     "The increasing size and complexity of many software
                 systems demand a greater emphasis on capturing and
                 maintaining knowledge at many different levels within
                 the software development process. This knowledge
                 includes descriptions of the hardware and software
                 components and their behavior, external and internal
                 design specifications, and support for system testing.
                 The Knowledge-based software engineering (KBSE)
                 research paradigm is concerned with systems that use
                 formally represented knowledge, with associated
                 inference procedures, to support the various
                 subactivities of software development. As they growing
                 scale, KBSE systems must balance expressivity and
                 inferential power with the real demands of knowledge
                 base construction, maintenance, performance, and
                 comprehensibility. {\em Description logics\/} (DLs)
                 possess several features--a terminological orientation,
                 a formal semantics, and efficient reasoning
                 procedures--which offer an effective tradeoff of these
                 factors. We discuss three KBSE systems in which DLs
                 capture some of the requisite knowledge needed to
                 support design, coding, and testing activities. We then
                 survey some alternative approaches (to DLs) in KBSE
                 systems. We close with a discussion of the benefits of
                 DLs and ways to address some of their limitations.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "automated software engineering; knowledge basis;
                 logics; software development environments; testing;
                 tools",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2); Software --- Software Engineering
                 --- Coding Tools and Techniques (D.2.3); Software ---
                 Software Engineering --- Testing and Debugging (D.2.5);
                 Software --- Software Engineering --- Programming
                 Environments (D.2.6); Computing Methodologies ---
                 Artificial Intelligence --- Automatic Programming
                 (I.2.2); Computing Methodologies --- Artificial
                 Intelligence --- Knowledge Representation Formalisms
                 and Methods (I.2.4): {\bf Representations (procedural
                 and rule-based)}",
}

@Article{Rothermel:1997:SER,
  author =       "Gregg Rothermel and Mary Jean Harrold",
  title =        "A safe, efficient regression test selection
                 technique",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "2",
  pages =        "173--210",
  month =        apr,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-2/p173-rothermel/p173-rothermel.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-2/p173-rothermel/",
  abstract =     "Regression testing is an expensive but necessary
                 maintenance activity performed on modified software to
                 provide confidence that changes are correct and do not
                 adversely affect other portions of the software. A
                 regression test selection technique chooses, from an
                 existing test set, tests that are deemed necessary to
                 validate modified software. We present a new technique
                 for regression test selection. Our algorithms construct
                 control flow graphs for a procedure or program and its
                 modified version and use these graphs to select tests
                 that execute changed code from the original test suite.
                 We prove that, under certain conditions, the set of
                 tests our technique selects includes every test from
                 the original test suite that con expose faults in the
                 modified procedure or program. Under these conditions
                 our algorithms are {\em safe}. Moreover, although our
                 algorithms may select some tests that cannot expose
                 faults, they are at lease as precise as other safe
                 regression test selection algorithms. Unlike many other
                 regression test selection algorithms, our algorithms
                 handle all language constructs and all types of program
                 modifications. We have implemented our algorithms;
                 initial empirical studies indicate that our technique
                 can significantly reduce the cost of regression testing
                 modified software.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Measurement; Performance; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "regression test selection; regression testing;
                 selective retest",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Testing and Debugging (D.2.5); Software ---
                 Software Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7): {\bf Corrections**}",
}

@Article{Allen:1997:FBA,
  author =       "Robert Allen and David Garlan",
  title =        "A formal basis for architectural connection",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "3",
  pages =        "213--249",
  month =        jul,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See errata \cite{Allen:1998:EFB}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-3/p213-allen/p213-allen.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-3/p213-allen/",
  abstract =     "As software systems become more complex, the overall
                 system structure--or software architecture--becomes a
                 central design problem. An important step toward an
                 engineering discipline of software is a formal basis
                 for describing and analyzing these designs. In the
                 article we present a formal approach to one aspect of
                 architectural design: the interactions among
                 components. The key idea is to define architectural
                 connectors as explicit semantic entities. These are
                 specified as a collection of protocols that
                 characterize each of the participant roles in an
                 interaction and how these roles interact. We illustrate
                 how this scheme can be used to define a variety of
                 common architectural connectors. We further provide a
                 formal semantics and show how this leads to a system in
                 which architectural compatibility can be checked in a
                 way analogous to type-checking in programming
                 languages.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal models; model-checking; module interconnection;
                 software analysis; WRIGHT",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Modules and interfaces};
                 Software --- Software Engineering --- Design**
                 (D.2.10): {\bf Representation**}; Theory of Computation
                 --- Logics and Meanings of Programs --- Specifying and
                 Verifying and Reasoning about Programs (F.3.1): {\bf
                 Specification techniques}; Theory of Computation ---
                 Mathematical Logic and Formal Languages --- Formal
                 Languages (F.4.3)",
}

@Article{Roman:1997:MUR,
  author =       "Gruia-Catalin Roman and Peter J. McCann and Jerome Y.
                 Plun",
  title =        "Mobile {UNITY}: reasoning and specification in mobile
                 computing",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "3",
  pages =        "250--282",
  month =        jul,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-3/p250-roman/p250-roman.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-3/p250-roman/",
  abstract =     "Mobile computing represents a major point of departure
                 from the traditional distributed-computing paradigm.
                 The potentially very large number of independent
                 computing units, a decoupled computing style, frequent
                 disconnections, continuous position changes, and the
                 location-dependent nature of the behavior and
                 communication patterns present designers with
                 unprecedented challenges in the areas of modularity and
                 dependability. So far, the literature on mobile
                 computing is dominated by concerns having to de with
                 the development of protocols and services. This article
                 complements this perspective by considering the nature
                 of the underlying formal models that will enable us to
                 specify and reason about such computations. The basic
                 research goal is to characterize fundamental issues
                 facing mobile computing. We want to achieve this in a
                 manner analogous to the way concepts such as shared
                 variables and message passing help us understand
                 distributed computing. The pragmatic objective is to
                 develop techniques that facilitate the verification and
                 design of dependable mobile systems. Toward this goal
                 we employ the methods of UNITY. To focus on what is
                 essential, we center our study on {\em ad hoc
                 networks}, whose singular nature is bound to reveal the
                 ultimate impact of movement on the way one computes and
                 communicates in a mobile environment. To understand
                 interactions we start with the UNITY concepts of union
                 and superposition and consider direct generalizations
                 to transient interactions. The motivation behind the
                 transient nature of the interactions comes from the
                 fact that components can communicate with each other
                 only when they are within a certain range. The notation
                 we employ is a highly modular extension of the UNITY
                 programming notation. Reasoning about mobile
                 computations relies on extensions to the UNITY proof
                 logic.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Reliability; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal methods; mobile computing; mobile UNITY; shared
                 variables; synchronization; transient interactions;
                 weak consistency",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Correctness proofs};
                 Software --- Programming Languages --- Formal
                 Definitions and Theory (D.3.1): {\bf Semantics};
                 Software --- Programming Languages --- Language
                 Constructs and Features (D.3.3): {\bf Concurrent
                 programming structures}; Theory of Computation ---
                 Logics and Meanings of Programs --- Specifying and
                 Verifying and Reasoning about Programs (F.3.1); Theory
                 of Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2)",
}

@Article{Ambriola:1997:APC,
  author =       "Vincenzo Ambriola and Reidar Conradi and Alfonso
                 Fuggetta",
  title =        "Assessing process-centered software engineering
                 environments",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "3",
  pages =        "283--328",
  month =        jul,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-3/p283-ambriola/p283-ambriola.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-3/p283-ambriola/",
  abstract =     "Process-centered software engineering environments
                 (PSEEs) are the most recent generation of environments
                 supporting software development activities. They
                 exploit a representation of the process (called the
                 {\em process model\/}) that specifies how to carry out
                 software development activities, the roles and tasks of
                 software developers, and how to use and control
                 software development tools. A process model is
                 therefore a vehicle to better understand and
                 communicate the process. If it is expressed in a formal
                 notation, it can be used to support a variety of
                 activities such as process analysis, process
                 simulation, and process enactment. PSEEs provide
                 automatic support for these activities. They exploit
                 languages based on different paradigms, such as Petri
                 nets and rule-based systems. They include facilities to
                 edit and analyze process models. By enacting the
                 process model, a PSEE provides a variety of services,
                 such as assistance for software developers, automation
                 of routine tasks, invocation and control of software
                 development tools, and enforcement of mandatory rules
                 and practices. Several PSEEs have been developed, both
                 as research projects and as commercial products. The
                 initial deployment and exploitation of this technology
                 have made it possible to produce a significant amount
                 of experiences, comments, evaluations, and feedback. We
                 still lack, however, consistent and comprehensive
                 assessment methods that can be used to collect and
                 organize this information. This article aims at
                 contributing to the definition of such methods, by
                 providing a systematic comparison grid and by
                 accomplishing an initial evaluation of the state of the
                 art in the field. This evaluation takes into account
                 the systems that have been developed by the authors in
                 the past five years, as well as the main
                 characteristics of other well-known environments",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Human Factors; Languages; Management",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "CASE; enabling technology; process modeling languages;
                 process-centered software engineering environments;
                 software process",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Computing Milieux --- Management
                 of Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software development};
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Software Management (K.6.3):
                 {\bf Software maintenance}; Software --- Software
                 Engineering --- Design Tools and Techniques (D.2.2):
                 {\bf Computer-aided software engineering (CASE)}",
}

@Article{Petrenko:1997:CRT,
  author =       "Alexandre Petrenko",
  title =        "Comments on {``A reduced test suite for protocol
                 conformance testing''}",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "3",
  pages =        "329--331",
  month =        jul,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See \cite{Bernhard:1994:RTS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-3/p329-petrenko/p329-petrenko.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-3/p329-petrenko/",
  abstract =     "A previous {\em ACM TOSEM\/} article of Ph. Bernhard
                 (``A Reduced Test Suite of Protocol Conformance
                 Testing,'' {\em ACM Transactions on Software
                 Engineering and Methodology}, Vol. 3, No. 3, July 1994,
                 pages 201--220) describes three new versions of the
                 so-called W-method for solving the protocol-testing
                 problem, i.e., solving the Mealy machine equivalence
                 problem. The author claims that these versions all have
                 the same fault detection capability as the original
                 W-method. In this correspondence we prove that the
                 results of that article are incorrect.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Reliability; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2): {\bf Protocol verification}; Software ---
                 Software Engineering --- Testing and Debugging (D.2.5):
                 {\bf Testing tools (e.g., data generators, coverage
                 testing)}",
}

@Article{Zaremski:1997:SMS,
  author =       "Amy Moormann Zaremski and Jeannette M. Wing",
  title =        "Specification matching of software components",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "4",
  pages =        "333--369",
  month =        oct,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-4/p333-zaremski/p333-zaremski.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-4/p333-zaremski/",
  abstract =     "Specification matching is a way to compare two
                 software components, based on descriptions of the
                 component's behaviors. In the context of software reuse
                 and library retrieval, it can help determine whether
                 one component can be substituted for another or how one
                 can be modified to fit the requirements of the other.
                 In the context of object-oriented programming, it can
                 help determine when one type is a behavioral subtype of
                 another. We use formal specifications to describe the
                 behavior of software components and, hence, to
                 determine whether two components match. We give precise
                 definitions of not just exact match, but, more
                 relevantly, various flavors of relaxed match. These
                 definitions capture the notions of generalization,
                 specialization, and substitutability of software
                 components. Since our formal specifications are pre-
                 and postconditions written as predicates in first-order
                 logic, we rely on theorem proving to determine match
                 and mismatch. We give examples from our implementation
                 of specification matching using the Larch Prover.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Documentation; Standardization; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf Software libraries}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Modules, packages}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Pre- and post-conditions}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Specification techniques}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf
                 Retrieval models}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Selection process}",
}

@Article{Gupta:1997:HSI,
  author =       "Rajiv Gupta and Mary Lou Soffa and John Howard",
  title =        "Hybrid slicing: integrating dynamic information with
                 static analysis",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "4",
  pages =        "370--397",
  month =        oct,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-4/p370-gupta/p370-gupta.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-4/p370-gupta/",
  abstract =     "Program slicing is an effective technique for
                 narrowing the focus of attention to the relevant parts
                 of a program during the debugging process. However,
                 imprecision is a problem in static slices, since they
                 are based on all possible executions that reach a given
                 program point rather than the specific execution under
                 which the program is being debugged. Dynamic slices,
                 based on the specific execution being debugged, are
                 precise but incur high run-time overhead due to the
                 tracing information that is collected during the
                 program's execution. We present a hybrid slicing
                 technique that integrates dynamic information from a
                 specific execution into a static slice analysis. The
                 {\em hybrid slice\/} produced is more precise that the
                 static slice and less costly that the dynamic slice.
                 The technique exploits dynamic information that is
                 readily available during debugging--namely, breakpoint
                 information and the dynamic call graph. This
                 information is integrated into a static slicing
                 analysis to more accurately estimate the potential
                 paths taken by the program. The breakpoints and
                 call/return points, used as reference points, divide
                 the execution path into intervals. By associating each
                 statement in the slice with an execution interval,
                 hybrid slicing provides information as to when a
                 statement was encountered during execution. Another
                 attractive feature of our approach is that it allows
                 the user to control the cost of hybrid slicing by
                 limiting the amount of dynamic information used in
                 computing the slice. We implemented the hybrid slicing
                 technique to demonstrate the feasibility of our
                 approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Experimentation; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "breakpoint; dynamic call graph; dynamic slice; hybrid
                 slice; static slice",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5)",
}

@Article{Zeller:1997:UVT,
  author =       "Andreas Zeller and Gregor Snelting",
  title =        "Unified versioning through feature logic",
  journal =      j-TOSEM,
  volume =       "6",
  number =       "4",
  pages =        "398--441",
  month =        oct,
  year =         "1997",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1997-6-4/p398-zeller/p398-zeller.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1997-6-4/p398-zeller/",
  abstract =     "Software configuration management (SCM) suffers from
                 tight coupling between SCM version-ing models and the
                 imposed SCM processes. In order to adapt SCM tools to
                 SCM processes, rather than vice versa, we propose a
                 unified versioning model, the {\em version set model}.
                 Version sets denote versions, components, and
                 configurations by {\em feature terms}, that is, Boolean
                 terms over ({\em feature : value\/})-attributions.
                 Through {\em feature logic}, we deduce consistency of
                 abstract configurations as well as features of derived
                 components and describe how features propagate in the
                 SCM process; using {\em feature implications}, we
                 integrate change-oriented and version-oriented SCM
                 models. We have implemented the version set model in an
                 SCM system called ICE, for {\em Incremental
                 Configuration Environment}. ICE is based on a {\em
                 featured file system (FFS)}, where version sets are
                 accessed as virtual files and directories. Using the
                 well-known C preprocessor (CPP) representation, users
                 can view and edit multiple versions simultaneously,
                 while only the differences between versions are stored.
                 It turns out that all major SCM models can be realized
                 and integrated efficiently on top of the FFS,
                 demonstrating the flexible and unifying nature of the
                 version set model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Management; Standardization; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "feature logic; version sets",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Software --- Software Engineering
                 --- Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Version control}; Software --- Software
                 Engineering --- Management (D.2.9): {\bf Software
                 configuration management}; Software --- Operating
                 Systems --- File Systems Management (D.4.3); Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3); Computing Methodologies
                 --- Artificial Intelligence --- Knowledge
                 Representation Formalisms and Methods (I.2.4); Software
                 --- Software Engineering --- Management (D.2.9): {\bf
                 Programming teams}",
}

@Article{Doppke:1998:SPM,
  author =       "John C. Doppke and Dennis Heimbigner and Alexander L.
                 Wolf",
  title =        "Software process modeling and execution within virtual
                 environments",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "1",
  pages =        "1--40",
  month =        jan,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-1/p1-doppke/p1-doppke.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-1/p1-doppke/",
  abstract =     "In the past, multiuser virtual environments have been
                 developed as venues for entertainment and social
                 interaction. Recent research focuses instead on their
                 utility in carrying out work in the real world. This
                 research has identified the importance of a mapping
                 between the real and the virtual that permits the
                 representation of real tasks in the virtual
                 environment. We investigate the use of virtual
                 environments--in particular, MUDs (Multi-User
                 Dimensions)--in the domain of software process. In so
                 doing, we define a mapping, or {\em metaphor}, that
                 permits the representation of software processes within
                 a MUD. The system resulting from this mapping, called
                 {\em Promo}, permits the modeling and execution of
                 software processes by geographically dispersed
                 agents.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Management; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "MOO; MUD; PROMO; software process; tools; virtual
                 environments",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Computing Milieux --- Management
                 of Computing and Information Systems --- Software
                 Management (K.6.3); Information Systems --- Information
                 Interfaces and Presentation --- Multimedia Information
                 Systems (H.5.1): {\bf Artificial, augmented, and
                 virtual realities}; Software --- Software Engineering
                 --- Management (D.2.9): {\bf Software process models
                 (e.g., CMM, ISO, PSP)}",
}

@Article{Porter:1998:USV,
  author =       "Adam Porter and Harvey Siy and Audris Mockus and
                 Lawrence Votta",
  title =        "Understanding the sources of variation in software
                 inspections",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "1",
  pages =        "41--79",
  month =        jan,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-1/p41-porter/p41-porter.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-1/p41-porter/",
  abstract =     "In a previous experiment, we determined how various
                 changes in three structural elements of the software
                 inspection process (team size and the number and
                 sequencing of sessions) altered effectiveness and
                 interval. Our results showed that such changes did not
                 significantly influence the defect detection rate, but
                 that certain combinations of changes dramatically
                 increased the inspection interval. We also observed a
                 large amount of unexplained variance in the data,
                 indicating that other factors must be affecting
                 inspection performance. The nature and extent of these
                 other factors now have to be determined to ensure that
                 they had not biased our earlier results. Also,
                 identifying these other factors might suggest
                 additional ways to improve the efficiency of
                 inspections. Acting on the hypothesis that the
                 ``inputs'' into the inspection process (reviewers,
                 authors, and code units) were significant sources of
                 variation, we modeled their effects on inspection
                 performance. We found that they were responsible for
                 much more variation in detect detection than was
                 process structure. This leads us to conclude that
                 better defect detection techniques, not better process
                 structures, are the key to improving inspection
                 effectiveness. The combined effects of process inputs
                 and process structure on the inspection interval
                 accounted for only a small percentage of the variance
                 in inspection interval. Therefore, there must be other
                 factors which need to be identified.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Experimentation; Measurement; Performance;
                 Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "empirical studies; software inspection; software
                 process; statistical models",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Code inspections and
                 walk-throughs}; Software --- Software Engineering ---
                 Management (D.2.9): {\bf Software process models (e.g.,
                 CMM, ISO, PSP)}",
}

@Article{Baresi:1998:TFS,
  author =       "Luciano Baresi and Mauro Pezz{\`e}",
  title =        "Toward formalizing structured analysis",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "1",
  pages =        "80--107",
  month =        jan,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-1/p80-baresi/p80-baresi.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-1/p80-baresi/",
  abstract =     "Real-time extensions to structured analysis (SA/RT)
                 are popular in industrial practice. Despite the large
                 industrial experience and the attempts to formalize the
                 various ``dialects,'' SA/RT notations are still
                 imprecise and ambiguous. This article tries to identify
                 the semantic problems of the requirements definition
                 notation defined by Hatley and Pirbhai, one of the
                 popular SA/RT ``dialects,'' and discusses possible
                 solutions. As opposed to other articles that give their
                 own interpretation, this article does not propose a
                 specific semantics for the notation. This article
                 identifies imprecisions, i.e., missing or partial
                 information about features of the notation; it
                 discusses ambiguities, i.e., elements of the definition
                 that allow at least two different (``reasonable'')
                 interpretations of features of the notation; and it
                 lists extensions, i.e., features not belonging to the
                 notation, but required by many industrial users and
                 often supported by CASE tools. This article contributes
                 by clarifying whether specific interpretations can be
                 given unique semantics or retain ambiguities of the
                 original definition. The article allows for the
                 evaluation of formal definitions by indicating
                 alternatives and consequences of the specific
                 choices.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Documentation; Measurement; Performance;
                 Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Hatley and Pirbhai's requirements definition notation;
                 informal versus formal specifications; structured
                 analysis/real-time",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Coding Tools and Techniques
                 (D.2.3): {\bf Structured programming}",
}

@Article{Bowdidge:1998:SRD,
  author =       "Robert W. Bowdidge and William G. Griswold",
  title =        "Supporting the restructuring of data abstractions
                 through manipulation of a program visualization",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "2",
  pages =        "109--157",
  month =        apr,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-2/p109-bowdidge/p109-bowdidge.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-2/p109-bowdidge/",
  abstract =     "With a meaning-preserving restructuring tool, a
                 software engineer can change a program's structure to
                 ease future modifications. However, deciding how to
                 restructure the program requires a global understanding
                 of the program's structure, which cannot be derived
                 easily by directly inspecting the source code. We
                 describe a manipulable program visualization--the {\em
                 star diagram\/} --that supports the restructuring task
                 of encapsulating a global data structure. The star
                 diagram graphically displays information pertinent to
                 encapsulation, and direct manipulation of the diagram
                 causes the underlying program to be restructured. The
                 visualization compactly presents all statements in the
                 program that use the given global data structure,
                 helping the programmer to choose the functions that
                 completely encapsulate it. Additionally, the
                 visualization elides code unrelated to the data
                 structure and to the task and collapses similar
                 expressions to help the programmer identify frequently
                 occurring code fragments and manipulate them together.
                 The visualization is mapped directly to the program
                 text, so manipulation of the visualization also
                 restructures the program. We present the star diagram
                 concept and describe an implementation of the star
                 diagram built upon a meaning-preserving restructuring
                 tool for Scheme. We also describe our creation of star
                 diagram generators for C programs, and we test the
                 scalability of the star diagram using large C and MUMPS
                 programs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "meaning-preserving restructuring; semi-automated
                 restructuring; software visualization; star diagram;
                 tool-supported restructuring",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Modules and interfaces};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf User interfaces}; Software ---
                 Software Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7): {\bf Restructuring, reverse
                 engineering, and reengineering}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Abstract data types}",
}

@Article{Murphy:1998:ESS,
  author =       "Gail C. Murphy and David Notkin and William G.
                 Griswold and Erica S. Lan",
  title =        "An empirical study of static call graph extractors",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "2",
  pages =        "158--191",
  month =        apr,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-2/p158-murphy/p158-murphy.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-2/p158-murphy/",
  abstract =     "Informally, a call graph represents calls between
                 entities in a given program. The call graphs that
                 compilers compute to determine the applicability of an
                 optimization must typically be conservative: a call may
                 be omitted only if it can never occur in any execution
                 of the program. Numerous software engineering tools
                 also extract call graphs with the expectation that they
                 will help software engineers increase their
                 understanding of a program. The requirements placed on
                 software engineering tools that compute call graphs are
                 typically more relaxed than for compilers. For example,
                 some false negatives--calls that can in fact take place
                 in some execution of the program, but which are omitted
                 from the call graph--may be acceptable, depending on
                 the understanding task at hand. In this article, we
                 empirically show a consequence of this spectrum of
                 requirements by comparing the C call graphs extracted
                 from three software systems (mapmaker, mosaic, and gcc)
                 by nine tools (cflow, cawk, CIA, Field, GCT, Imagix,
                 LSME, Mawk, and Rigiparse). A quantitative analysis of
                 the call graphs extracted for each system shows
                 considerable variation, a result that is
                 counterintuitive to many experienced software
                 engineers. A qualitative analysis of these results
                 reveals a number of reasons for this variation:
                 differing treatments of macros, function pointers,
                 input formats, etc. The fundamental problem is not that
                 variances among the graphs extracted by different tools
                 exist, but that software engineers have little sense of
                 the dimensions of approximation in any particular call
                 graph. In this article, we describe and discuss the
                 study, sketch a design space for static call graph
                 extractors, and discuss the impact of our study on
                 practitioners, tool developers, and researchers.
                 Although this article considers only one kind of
                 information, call graphs, many of the observations also
                 apply to static extractors of other kinds of
                 information, such as inheritance structures, file
                 dependences, and references to global variables.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "call graphs; design space; empirical study; software
                 system analysis; static analysis",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4)",
}

@Article{Hunt:1998:DAE,
  author =       "James J. Hunt and Kiem-Phong Vo and Walter F. Tichy",
  title =        "Delta algorithms an empirical analysis",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "2",
  pages =        "192--214",
  month =        apr,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See addendum \cite{Hunt:1998:ADA}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-2/p192-hunt/p192-hunt.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-2/p192-hunt/",
  abstract =     "Delta algorithms compress data by encoding one file in
                 terms of another. This type of compression is useful in
                 a number of situations: strong multiple versions of
                 data, displaying differences, merging changes,
                 distributing updates, storing backups, transmitting
                 video sequences, and others. This article studies the
                 performance parameters of several delta algorithms,
                 using a benchmark of over 1,300 pairs of files taken
                 from two successive releases of GNU software. Results
                 indicate that modern delta compression algorithms based
                 on Ziv--Lempel techniques significantly outperform {\em
                 diff}, a popular but older delta compressor, in terms
                 of compression ratio. The modern compressors also
                 correlate better with the actual difference between
                 files without sacrificing performance.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Experimentation; Measurement;
                 Performance",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "benchmark; delta encoding; differencing",
  subject =      "Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7): {\bf Version
                 control}; Software --- Software Engineering --- Metrics
                 (D.2.8): {\bf Performance measures}; Data --- Coding
                 and Information Theory (E.4): {\bf Data compaction and
                 compression}; Data --- Files (E.5): {\bf
                 Backup/recovery}",
}

@Article{Cook:1998:DMS,
  author =       "Jonathan E. Cook and Alexander L. Wolf",
  title =        "Discovering models of software processes from
                 event-based data",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "3",
  pages =        "215--249",
  month =        jul,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-3/p215-cook/p215-cook.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-3/p215-cook/",
  abstract =     "Many software process methods and tools presuppose the
                 existence of a formal model of a process.
                 Unfortunately, developing a formal model for an
                 on-going, complex process can be difficult, costly, and
                 error prone. This presents a practical barrier to the
                 adoption of process technologies, which would be
                 lowered by automated assistance in creating formal
                 models. To this end, we have developed a data analysis
                 technique that we term {\em process discovery.\/} Under
                 this technique, data describing process events are
                 first captured from an on-going process and then used
                 to generate a formal model of the behavior of that
                 process. In this article we describe a Markov method
                 that we developed specifically for process discovery,
                 as well as describe two additional methods that we
                 adopted from other domains and augmented for our
                 purposes. The three methods range from the purely
                 algorithmic to the purely statistical. We compare the
                 methods and discuss their application in an industrial
                 case study.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Management",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Balboa; process discovery; software process; tools",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Computing Milieux --- Management
                 of Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software development};
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Software Management (K.6.3):
                 {\bf Software maintenance}",
}

@Article{Chen:1998:BWI,
  author =       "Huo Yan Chen and T. H. Tse and F. T. Chan and T. Y.
                 Chen",
  title =        "In black and white: an integrated approach to
                 class-level testing of object-oriented programs",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "3",
  pages =        "250--295",
  month =        jul,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-3/p250-chen/p250-chen.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-3/p250-chen/",
  abstract =     "Because of the growing importance of object-oriented
                 programming, a number of testing strategies have been
                 proposed. They are based either on pure black-box or
                 white-box techniques. We propose in this article a
                 methodology to integrate the black- and white-box
                 techniques. The black-box technique is used to select
                 test cases. The white-box technique is mainly applied
                 to determine whether two objects resulting from the
                 program execution of a test care are observationally
                 equivalent. It is also used to select test cases in
                 some situations. We define the concept of a fundamental
                 pair as a pair of equivalent terms that are formed by
                 replacing all the variables on both sides of an axiom
                 by normal forms. We prove that an implementation is
                 consistent with respect to all equivalent terms if and
                 only if it is consistent with respect to all
                 fundamental pairs. In other words, the testing coverage
                 of fundamental pairs is as good as that of all possible
                 term rewritings, and hence we need only concentrate on
                 the testing of fundamental pairs. Our strategy is based
                 on mathematical theorems. According to the strategy, we
                 propose an algorithm for selecting a finite set of
                 fundamental pairs as test cases. Given a pair of
                 equivalent terms as a test case, we should then
                 determine whether the objects that result from
                 executing the implemented program are observationally
                 equivalent. We prove, however, that the observational
                 equivalence of objects cannot be determined using a
                 finite set of observable contexts (which are operation
                 sequences ending with an observer function) derived
                 from any black-box technique. Hence we supplement our
                 approach with a ``relevant observable context''
                 technique, which is a heuristic white-box technique to
                 select a relevant finite subset of the set of
                 observable contexts for determining the observational
                 equivalence. The relevant observable contexts are
                 constructed from a data member relevance graph (DRG),
                 which is an abstraction of the given implementation for
                 a given specification. A semiautomatic tool hass been
                 developed to support this technique.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Languages; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "abstract data types; algebraic specification;
                 object-oriented programming; observational equivalence;
                 software-testing methodologies",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Software --- Programming Languages
                 --- Language Classifications (D.3.2): {\bf
                 Object-oriented languages}",
}

@Article{Crow:1998:FSS,
  author =       "Judith Crow and Ben {Di Vito}",
  title =        "Formalizing space shuttle software requirements: four
                 case studies",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "3",
  pages =        "296--332",
  month =        jul,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-3/p296-crow/p296-crow.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-3/p296-crow/",
  abstract =     "This article describes four case studies in which
                 requirements for new flight software subsystems on
                 NASA's Space Shuttle were analyzed using mechanically
                 supported formal methods. Three of the studies used
                 standard formal specification and verification
                 techniques, and the fourth used state exploration.
                 These applications illustrate two thesis: (1) formal
                 methods complement conventional requirements analysis
                 processes effectively and (2) formal methods confer
                 benefits even when only selectively adopted and
                 applied. The studies also illustrate the interplay of
                 application maturity level and formal methods strategy,
                 especially in areas such as technology transfer, legacy
                 applications, and rapid formalization, and they raise
                 interesting issues in problem domain modeling and in
                 tailoring formal techniques to applications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "flight software; formal methods; requirements
                 analysis; space shuttle; state exploration; theorem
                 proving",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1): {\bf Tools}; Theory of Computation --- Logics
                 and Meanings of Programs --- Specifying and Verifying
                 and Reasoning about Programs (F.3.1): {\bf Logics of
                 programs}; Theory of Computation --- Logics and
                 Meanings of Programs --- Specifying and Verifying and
                 Reasoning about Programs (F.3.1): {\bf Mechanical
                 verification}; Theory of Computation --- Logics and
                 Meanings of Programs --- Specifying and Verifying and
                 Reasoning about Programs (F.3.1): {\bf Specification
                 techniques}",
}

@Article{Allen:1998:EFB,
  author =       "Robert Allen and David Garlan",
  title =        "Errata: {``A formal basis for architectural
                 connection''}",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "3",
  pages =        "333--334",
  month =        jul,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See \cite{Allen:1997:FBA}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-3/p333-allen/p333-allen.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-3/p333-allen/",
  abstract =     "We present corrections to a previously published
                 article which appeared in {\em ACM Transaction on
                 Software Engineering and Methodology\/} 6, 3 (July
                 1997), pp. 213-249",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal models; model-checking; module interconnection;
                 software analysis; WRIGHT",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Modules and interfaces};
                 Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Specification techniques}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Formal Languages (F.4.3)",
}

@Article{Hunter:1998:MIS,
  author =       "Anthony Hunter and Bashar Nuseibeh",
  title =        "Managing inconsistent specifications: reasoning,
                 analysis, and action",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "4",
  pages =        "335--367",
  month =        oct,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-4/p335-hunter/p335-hunter.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-4/p335-hunter/",
  abstract =     "In previous work, we advocated continued development
                 of specifications in the presence of inconsistency. To
                 support this, we used classical logic to represent
                 partial specifications and to identify inconsistencies
                 between them. We now present an adaptation of classical
                 logic, which we term quasi-classical (QC) logic, that
                 allows continued reasoning in the presence of
                 inconsistency. The adaptation is a weakening of
                 classical logic that prohibits all trivial derivations,
                 but still allows all resolvants of the assumptions to
                 be derived. Furthermore, the connectives behave in a
                 classical manner. We then present a development called
                 labeled QC logic that records and tracks assumptions
                 used in reasoning. This facilitates a logical analysis
                 of inconsistent information. We discuss that
                 application of labeled QC logic in the analysis of
                 multiperspective specifications. Such specifications
                 are developed by multiple participants who hold
                 overlapping, often inconsistent, views of the systems
                 they are developing.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "managing inconsistency; paraconsistent logics;
                 requirements specification; viewpoints",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Software/Program Verification (D.2.4): {\bf
                 Validation}; Software --- Software Engineering ---
                 Testing and Debugging (D.2.5): {\bf Error handling and
                 recovery}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Restructuring, reverse engineering, and
                 reengineering}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Mathematical Logic
                 (F.4.1): {\bf Proof theory}; Software --- Software
                 Engineering --- General (D.2.0); Software --- Software
                 Engineering --- Design Tools and Techniques (D.2.2)",
}

@Article{Jaccheri:1998:ESP,
  author =       "Maria Letizia Jaccheri and Gian Pietro Picco and
                 Patricia Lago",
  title =        "Eliciting software process models with the {E3}
                 language",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "4",
  pages =        "368--410",
  month =        oct,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-4/p368-jaccheri/p368-jaccheri.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-4/p368-jaccheri/",
  abstract =     "Software processes are complex entities that demand
                 careful understand ing and improvement as they
                 determine the quality of the resulting product. A
                 necessary step toward the improvement of an
                 organization's process is a clear description of the
                 entities involved and of their mutual relationships.
                 Process model {\em elicitation\/} aims at constructing
                 this description under the shape of a software process
                 model. The model is constructed by gathering, from
                 several sources, process information which is often
                 incomplete, inconsistent, and ambiguous. A process
                 modeling language can be used to represent the model
                 being elicited. However, elicitation requires process
                 models to be understandable and well structured. These
                 requirements are often not satisfied by available
                 process modeling languages because of their bias toward
                 process enaction rather than process description. This
                 article presents a process modeling language and a
                 support tool which are conceived especially for process
                 model elicitation. The {\em E\/} 3 language is an
                 object-oriented modeling language with a graphical
                 notation. In {\em E\/} 3, associations are a means to
                 express constraints and facilitate reuse. The {\em E\/}
                 3 p-draw tool supports the creation and management of
                 {\em E\/} 3 models and provides a view mechanism that
                 enables inspection of models according to different
                 perspectives.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Documentation; Languages; Management",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "associations; process model elicitation; software
                 process modeling",
  subject =      "Software --- Programming Techniques ---
                 Object-oriented Programming (D.1.5); Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1); Software --- Software Engineering --- Design
                 Tools and Techniques (D.2.2): {\bf Computer-aided
                 software engineering (CASE)}; Software --- Software
                 Engineering --- Management (D.2.9): {\bf Software
                 configuration management}; Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Software Management (K.6.3): {\bf Software
                 development}; Computing Milieux --- Management of
                 Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software maintenance}",
}

@Article{Fuggetta:1998:AGI,
  author =       "Alfonso Fuggetta and Luigi Lavazza and Sandro Morasca
                 and Stefano Cinti and Giandomenico Oldano and Elena
                 Orazi",
  title =        "Applying {GQM} in an industrial software factory",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "4",
  pages =        "411--448",
  month =        oct,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-4/p411-fuggetta/p411-fuggetta.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-4/p411-fuggetta/",
  abstract =     "Goal/Question/Metric (GQM) is a paradigm for the
                 systematic definition, establishment, and exploitation
                 of measurement programs supporting the quantitative
                 evaluation of software processes and products. Although
                 GQM is a quite well-known method, detailed guidelines
                 for establishing a GQM program in an industrial
                 environment are still limited. Also, there are few
                 reported experiences on the application of GQM to
                 industrial cases. Finally, the technological support
                 for GQM is still inadequate. This article describes the
                 experience we have gained in applying GQM at Digital
                 Laboratories in Italy. The procedures, experiences, and
                 technology that have been employed in this study are
                 largely reusable by other industrial organizations
                 willing to introduce a GQM-based measurement program in
                 their development environments.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Management; Measurement",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "GQM; measurement cost; measurement process",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Metrics (D.2.8): {\bf Performance measures};
                 Software --- Software Engineering --- Management
                 (D.2.9): {\bf Productivity}; Software --- Software
                 Engineering --- Management (D.2.9): {\bf Software
                 quality assurance (SQA)}",
}

@Article{Hunt:1998:ADA,
  author =       "James J. Hunt and Walter F. Tichy",
  title =        "Addendum to {``Delta algorithms: an empirical
                 analysis''}",
  journal =      j-TOSEM,
  volume =       "7",
  number =       "4",
  pages =        "449--449",
  month =        oct,
  year =         "1998",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  note =         "See \cite{Hunt:1998:DAE}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1998-7-4/p449-hunt/p449-hunt.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1998-7-4/p449-hunt/",
  abstract =     "The authors supply machine configurations for
                 experiments reported in ``Delta Algorithms: An
                 Empirical Analysis,'' by Hunt et al. ({\em ACM Trans.
                 Softw. Eng. Methodol.\/} 7, 2 (Apr. 1998), pp.
                 192-214).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "benchmark; delta encoding; differencing",
  subject =      "Software --- Software Engineering --- Distribution,
                 Maintenance, and Enhancement (D.2.7): {\bf Version
                 control}; Software --- Software Engineering --- Metrics
                 (D.2.8): {\bf Performance measures}; Data --- Coding
                 and Information Theory (E.4): {\bf Data compaction and
                 compression}; Data --- Files (E.5): {\bf
                 Backup/recovery}",
}

@Article{Ostroff:1999:CRD,
  author =       "Jonathan S. Ostroff",
  title =        "Composition and refinement of discrete real-time
                 systems",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "1",
  pages =        "1--48",
  month =        jan,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-1/p1-ostroff/p1-ostroff.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-1/p1-ostroff/",
  abstract =     "Reactive systems exhibit ongoing, possibly
                 nonterminating, interaction with the environment.
                 Real-time systems are reactive systems that must
                 satisfy quantitative timing constraints. This paper
                 presents a structured compositional design method for
                 discrete real-time systems that can be used to combat
                 the combinatorial explosion of states in the
                 verification of large systems. A {\em composition
                 rule\/} describes how the correctness of the system can
                 be determined from the correctness of its modules,
                 without knowledge of their internal structure. The
                 advantage of compositional verification is clear. Each
                 module is both simpler and smaller than the system
                 itself. Composition requires the use of both
                 model-checking and deductive techniques. A {\em
                 refinement rule\/} guarantees that specifications of
                 high-level modules are preserved by their
                 implementations. The {\em StateTime\/} toolset is used
                 to automate parts of compositional designs using a
                 combination of model-checking and simulation. The
                 design method is illustrated using a reactor shutdown
                 system that cannot be verified using the StateTime
                 toolset (due to the combinatorial explosion of states)
                 without compositional reasoning. The reactor example
                 also illustrates the use of the refinement rule.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "abstraction; model-checking; modules; refinement;
                 state explosion; temporal logic; timed logic",
  subject =      "Software --- Programming Techniques --- Concurrent
                 Programming (D.1.3); Software --- Software Engineering
                 --- Design** (D.2.10); Software --- Software
                 Engineering --- Requirements/Specifications (D.2.1):
                 {\bf Methodologies (e.g., object-oriented,
                 structured)}; Software --- Software Engineering ---
                 Software/Program Verification (D.2.4): {\bf Model
                 checking}; Software --- Software Engineering --- Design
                 Tools and Techniques (D.2.2): {\bf Modules and
                 interfaces}; Software --- Software Engineering ---
                 Design Tools and Techniques (D.2.2); Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1): {\bf Tools}; Software --- Software Engineering
                 --- Design Tools and Techniques (D.2.2): {\bf State
                 diagrams}; Software --- Software Engineering ---
                 Software/Program Verification (D.2.4)",
}

@Article{Cheung:1999:CSP,
  author =       "Shing Chi Cheung and Jeff Kramer",
  title =        "Checking safety properties using compositional
                 reachability analysis",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "1",
  pages =        "49--78",
  month =        jan,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-1/p49-cheung/p49-cheung.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-1/p49-cheung/",
  abstract =     "The software architecture of a distributed program can
                 be represented by a hierarchical composition of
                 subsystems, with interacting processes at the leaves of
                 the hierarchy. Compositional reachability analysis
                 (CRA) is a promising state reduction technique which
                 can be automated and used in stages to derive the
                 overall behavior of a distributed program based on its
                 architecture. CRA is particularly suitable for the
                 analysis of programs that are subject to evolutionary
                 change. When a program evolves, only the behaviors of
                 those subsystems affected by the change need be
                 reevaluated. The technique however has a limitation.
                 The properties available for analysis are constrained
                 by the set of actions that remain globally observable.
                 Properties involving actions encapsulated by subsystems
                 may therefore not be analyzed. In this article, we
                 enhance the CRA technique to check safety properties
                 which may contain actions that are not globally
                 observable. To achieve this, the state machine model is
                 augmented with a special trap state labeled as ?. We
                 propose a scheme to transform, in stages, a property
                 that involves hidden actions to one that involves only
                 globally observable actions. The enhanced technique
                 also includes a mechanism aiming at reducing the
                 debugging effort. The technique is illustrated using a
                 gas station system example.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "compositional reachability analysis; distributed
                 systems; model checking; safety properties; static
                 analysis",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2); Software --- Software Engineering ---
                 Software/Program Verification (D.2.4); Software ---
                 Software Engineering --- Management (D.2.9): {\bf
                 Software quality assurance (SQA)}",
}

@Article{Ciapessoni:1999:FMF,
  author =       "Emanuele Ciapessoni and Piergiorgio Mirandola and
                 Alberto Coen-Porisini and Dino Mandrioli and Angelo
                 Morzenti",
  title =        "From formal models to formally based methods: an
                 industrial experience",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "1",
  pages =        "79--113",
  month =        jan,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-1/p79-ciapessoni/p79-ciapessoni.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-1/p79-ciapessoni/",
  abstract =     "We address the problem of increasing the impact of
                 formal methods in the practice of industrial computer
                 applications. We summarize the reasons why formal
                 methods so far did not gain widespead use within the
                 industrial environment despite several promising
                 experiences. We suggest an evolutionary rather than
                 revolutionary attitude in the introduction of formal
                 methods in the practice of industrial applications, and
                 we report on our long-standing experience which
                 involves an academic institution. Politecnico di
                 Milano, two main industrial partners, ENEL and CISE,
                 and occasionally a few other industries. Our approach
                 aims at augmenting an existing and fairly deeply rooted
                 informal industrial methodology with our original
                 formalism, the logic specification language TRIO. On
                 the basis of the experiences we gained we argue that
                 our incremental attitude toward the introduction of
                 formal methods within the industry could be effective
                 largely independently from the chosen formalism.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Documentation; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal models; industrial applications; object
                 orientation; specification; supervision and control;
                 technology transfer",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Methodologies
                 (e.g., object-oriented, structured)}; Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2); Software --- Software Engineering ---
                 Software/Program Verification (D.2.4)",
}

@Article{McCann:1999:MMI,
  author =       "Peter J. McCann and Gruia-Catalin Roman",
  title =        "Modeling mobile {IP} in mobile {UNITY}",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "2",
  pages =        "115--146",
  month =        apr,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-2/p115-mccann/p115-mccann.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-2/p115-mccann/",
  abstract =     "With recent advances in wireless communication
                 technology, mobile computing is an increasingly
                 important area of research. A mobile system is one
                 where independently executing components may migrate
                 through some space during the course of the
                 computation, and where the pattern of connectivity
                 among the components changes as they move in and out of
                 proximity. Mobile UNITY is a notation and proof logic
                 for specifying and reasoning about mobile systems. In
                 this article it is argued that Mobile UNITY contributes
                 to the modular development of system specifications
                 because of the declarative fashion in which
                 coordination among components is specified. The
                 packet-forwarding mechanism at the core of the Mobile
                 IP protocol for routing to mobile hosts is taken as an
                 example. A Mobile UNITY model of packet forwarding and
                 the mobile system in which it must operate is
                 developed. Proofs of correctness properties, including
                 important real-time properties, are outlined, and the
                 role of formal verification in the development of
                 protocols such as Mobile IP is discussed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Reliability; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "formal methods; mobile computing; mobile UNITY; shared
                 variables; synchronization; transient interactions;
                 weak consistency",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2): {\bf IP}; Computer Systems Organization ---
                 Computer-Communication Networks --- Network
                 Architecture and Design (C.2.1): {\bf Wireless
                 communication}; Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2): {\bf Protocol verification}",
}

@Article{Cook:1999:SPV,
  author =       "Jonathan E. Cook and Alexander L. Wolf",
  title =        "Software process validation: quantitatively measuring
                 the correspondence of a process to a model",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "2",
  pages =        "147--176",
  month =        apr,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-2/p147-cook/p147-cook.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-2/p147-cook/",
  abstract =     "To a great extent, the usefulness of a formal model of
                 a software process lies in its ability to accurately
                 predict the behavior of the executing process.
                 Similarly, the usefulness of an executing process lies
                 largely in its ability to fulfill the requirements
                 embodied in a formal model of the process. When process
                 models and process executions diverge, something
                 significant is happening. We have developed techniques
                 for uncovering and measuring the discrepancies between
                 models and executions, which we call {\em process
                 validation}. Process validation takes a process
                 execution and a process model, and measures the level
                 of correspondence between the two. Our metrics are
                 tailorable and give process engineers control over
                 determining the severity of different types of
                 discrepancies. The techniques provide detailed
                 information once a high-level measurement indicates the
                 presence of a problem. We have applied our processes
                 validation methods in an industrial case study, of
                 which a portion is described in this article.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Management; Measurement",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "balboa; process validation; software process; tools",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Computing Milieux --- Management
                 of Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software development};
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Software Management (K.6.3):
                 {\bf Software maintenance}; Software --- Software
                 Engineering --- Metrics (D.2.8): {\bf Process
                 metrics}",
}

@Article{Devanbu:1999:GCF,
  author =       "Premkumar T. Devanbu",
  title =        "{GENOA} --- a customizable, front-end-retargetable
                 source code analysis framework",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "2",
  pages =        "177--212",
  month =        apr,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-2/p177-devanbu/p177-devanbu.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-2/p177-devanbu/",
  abstract =     "{\em Code analysis\/} tools provide support for such
                 software engineering tasks as program understanding,
                 software metrics, testing, and reengineering. In this
                 article we describe GENOA, the framework underlying
                 application generators such as Aria and GEN++ which
                 have been used to generate a wide range of practical
                 code analysis tools. This experience illustrates {\em
                 front-end retargetability\/} of GENOA; we describe the
                 features of the GENOA framework that allow it to be
                 used with different front ends. While permitting
                 arbitrary parse tree computations, the GENOA
                 specification language has special, compact iteration
                 operators that are tuned for expressing simple,
                 polynomial-time analysis programs; in fact, there is a
                 useful sublanguage of the GENOA language that can
                 express precisely all (and only) {\em
                 polynomial-time\/} (PTIME) analysis programs on parse
                 trees. Thus, we argue that the GENOA language is a
                 simple and convenient vehicle for implementing a range
                 of analysis tools. We also argue that the ``front-and
                 reuse'' approach of GENOA offers an important advantage
                 for tools aimed at large software projects: the reuse
                 of complex, expensive build procedures to run generated
                 tools over large source bases. In this article, we
                 describe the GENOA framework and our experiences with
                 it.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "code inspection; metrics; reverse engineering; source
                 analysis",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4); Software --- Software Engineering --- Coding
                 Tools and Techniques (D.2.3); Software --- Software
                 Engineering --- Programming Environments (D.2.6);
                 Software --- Software Engineering --- Software
                 Architectures (D.2.11); Software --- Software
                 Engineering --- Testing and Debugging (D.2.5); Software
                 --- Software Engineering --- Metrics (D.2.8)",
}

@Article{Damiani:1999:HAA,
  author =       "E. Damiani and M. G. Fugini and C. Bellettini",
  title =        "A hierarchy-aware approach to faceted classification
                 of objected-oriented components",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "3",
  pages =        "215--262",
  month =        jul,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-3/p215-damiani/p215-damiani.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-3/p215-damiani/",
  abstract =     "This article presents a hierarchy-aware classification
                 schema for obje ct-oriented code, where software
                 components are classified according to their {\em
                 behavioral characteristics}, such as provided services,
                 employed algorithms, and needed data. In the case of
                 reusable application frameworks, these characteristics
                 are constructed from their {\em model}, i.e., from the
                 description of the abstract classes specifying both the
                 framework structure and purpose. In conventional object
                 libraries, the characteristics are extracted
                 semiautomatically from class interfaces.
                 Characteristics are term pairs, weighted to represent
                 ``how well'' they describe component behavior. The set
                 of characteristics associated with a given component
                 forms its {\em software descriptor}. A descriptor base
                 is presented where descriptors are organized on the
                 basis of structured relationships, such as similarity
                 and composition. The classification is supported by a
                 thesaurus acting as a language-independent unified
                 lexicon. The descriptor base is conceived for
                 developers who, besides conventionally browsing the
                 descriptors hierarchy, can query the system, specifying
                 a set of desired functionalities and getting a ranked
                 set of adaptable candidates. User feedback is taken
                 into account in order to progressively ameliorate the
                 quality of the descriptors according to the views of
                 the user community. Feedback is made dependent of the
                 user typology through a {\em user profile}.
                 Experimental results in terms of recall and precision
                 of the retrieval mechanism against a sample code base
                 are reported.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Documentation",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "code analysis; component repositories; component
                 retrieval; software reuse; user feedback",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval
                 (H.3.3)",
}

@Article{Podgurski:1999:ESR,
  author =       "Andy Podgurski and Wassim Masri and Yolanda McCleese
                 and Francis G. Wolff and Charles Yang",
  title =        "Estimation of software reliability by stratified
                 sampling",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "3",
  pages =        "263--283",
  month =        jul,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-3/p263-podgurski/p263-podgurski.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-3/p263-podgurski/",
  abstract =     "A new approach to software reliability estimation is
                 presented that combines operational testing with
                 stratified sampling in order to reduce the number of
                 program executions that must be checked manually for
                 conformance to requirements. Automatic cluster analysis
                 is applied to execution profiles in order to stratify
                 captured operational executions. Experimental results
                 are reported that suggest this approach can
                 significantly reduce the cost of estimating
                 reliability.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "beta testing; cluster analysis; operational testing;
                 software reliability; software testing; statistical
                 testing; stratified sampling",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Software libraries}; Software
                 --- Software Engineering --- Testing and Debugging
                 (D.2.5); Software --- Operating Systems --- Reliability
                 (D.4.5); Software --- Software Engineering --- Design
                 Tools and Techniques (D.2.2)",
}

@Article{Jezequel:1999:RVC,
  author =       "Jean-Marc J{\'e}z{\'e}quel",
  title =        "Reifying variants in configuration management",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "3",
  pages =        "284--295",
  month =        jul,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-3/p284-jezequel/p284-jezequel.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-3/p284-jezequel/",
  abstract =     "Using a solid software configuration management (SCM)
                 is mandatory to establish and maintain the integrity of
                 the products of a software project throughout the
                 project's software life cycle. Even with the help of
                 sophisticated tools, handling the various dimensions of
                 SCM can be a daunting (and costly) task for many
                 projects. The contribution of this article is to (1)
                 propose a method (based on the use creational design
                 patterns) to simplify SCM by reifying the {\em
                 variants\/} of an object-oriented software system into
                 language-level objects and (2) show that newly
                 available compilation technology makes this proposal
                 attractive with respect to performance (memory
                 footprint and execution time) by inferring which
                 classes are needed for a specific configuration and
                 optimizing the generated code accordingly.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Management; Performance",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "compilation technology; Eiffel; Mecure;
                 object-oriented analysis and design; reifying variants;
                 SMDS; software configuration management",
  subject =      "Software --- Software Engineering (D.2); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf Software libraries}; Software ---
                 Software Engineering --- Management (D.2.9): {\bf
                 Software configuration management}",
}

@Article{Reiss:1999:DE,
  author =       "Steven P. Reiss",
  title =        "The {Desert} environment",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "4",
  pages =        "297--342",
  month =        oct,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-4/p297-reiss/p297-reiss.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-4/p297-reiss/",
  abstract =     "The Desert software engineering environment is a suite
                 of tools developed to enhance programmer productivity
                 through increased tool integration. It introduces an
                 inexpensive form of data integration to provide
                 additional tool capabilities and information sharing
                 among tools, uses a common editor to give high-quality
                 semantic feedback and to integrate different types of
                 software artifacts, and builds virtual files on demand
                 to address specific tasks. All this is done in an open
                 and extensible environment capable of handling large
                 software systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "integrated programming environments; program editors",
  subject =      "Software --- Software Engineering --- Coding Tools and
                 Techniques (D.2.3); Software --- Software Engineering
                 --- Programming Environments (D.2.6)",
}

@Article{Pohl:1999:PTP,
  author =       "Klaus Pohl and Klaus Weidenhaupt and Ralf D{\"o}mges
                 and Peter Haumer and Matthias Jarke and Ralf Klamma",
  title =        "{PRIME} --- toward process-integrated modeling
                 environments: 1",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "4",
  pages =        "343--410",
  month =        oct,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-4/p343-pohl/p343-pohl.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-4/p343-pohl/",
  abstract =     "Research in process-centered environments (PCEs) has
                 focused on project management support and has neglected
                 method guidance for the engineers performing the
                 (software) engineering process. It has been dominated
                 by the search for suitable process-modeling languages
                 and enactment mechanisms. The consequences of process
                 orientation on the computer-based engineering
                 environments, i.e., the interactive tools used during
                 process performance, have been studied much less. In
                 this article, we present the PRIME (Process Integrated
                 Modeling Environments) framework which empowers method
                 guidance through process-integrated tools. In contrast
                 to the tools of PCEs, the process-integrated tools of
                 PRIME adjust their behavior according to the current
                 process situation and the method definitions. Process
                 integration of PRIME tools is achieved through (1) the
                 definition of tool models; (2) the integration of the
                 tool models and the method definitions; (3) the
                 interpretation of the integrated environment model by
                 the tools, the process-aware control integration
                 mechanism, and the enactment mechanism; and (4) the
                 synchronization of the tools and the enactment
                 mechanism based on a comprehensive interaction
                 protocol. We sketch the implementation of PRIME as a
                 reusable implementation framework which facilitates the
                 realization of process-integrated tools as well as the
                 process integration of external tools. We define a
                 six-step procedure for building a PRIME-based
                 process-integrated environment (PIE) and illustrate how
                 PRIME facilitates change integration on an
                 easy-to-adapt modeling level.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Human Factors; Management; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "method guidance; PRIME; process modeling;
                 process-centered environments; process-integrated
                 environments; process-sensitive tools; tool
                 integration; tool modeling",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Tools};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Computer-aided software
                 engineering (CASE)}; Software --- Software Engineering
                 --- Programming Environments (D.2.6): {\bf Interactive
                 environments}; Software --- Programming Languages ---
                 Language Constructs and Features (D.3.3): {\bf
                 Frameworks}; Information Systems --- Information
                 Systems Applications --- Office Automation (H.4.1):
                 {\bf Workflow management}; Computer Applications ---
                 Computer-Aided Engineering (J.6); Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Software Management (K.6.3): {\bf Software process};
                 Software --- Software Engineering --- Programming
                 Environments (D.2.6): {\bf Integrated environments}",
}

@Article{Kuhn:1999:FCE,
  author =       "D. Richard Kuhn",
  title =        "Fault classes and error detection capability of
                 specification-based testing",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "4",
  pages =        "411--424",
  month =        oct,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-4/p411-kuhn/p411-kuhn.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-4/p411-kuhn/",
  abstract =     "Some varieties of specification-based testing rely
                 upon methods for generating test cases from predicates
                 in a software specification. These methods derive
                 various test conditions from logic expressions, with
                 the aim of detecting different types of faults. Some
                 authors have presented empirical results on the ability
                 of specification-based test generation methods to
                 detect failures. This article describes a method for
                 computing the conditions that must be covered by a test
                 set for the test set to guarantee detection of the
                 particular fault class. It is shown that there is a
                 coverage hierarchy to fault classes that is consistent
                 with, and may therefore explain, experimental results
                 on fault-based testing. The method is also shown to be
                 effective for computing MCDC-adequate tests.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Experimentation; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "testing",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4); Software --- Software Engineering
                 --- Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Testing and Debugging
                 (D.2.5)",
}

@Article{Damiani:1999:CHA,
  author =       "E. Damiani and M. G. Fugini and C. Bellettini",
  title =        "Corrigenda: a hierarchy-aware approach to faceted
                 classification of object-oriented components",
  journal =      j-TOSEM,
  volume =       "8",
  number =       "4",
  pages =        "425--472",
  month =        oct,
  year =         "1999",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/1999-8-4/p425-damiani/p425-damiani.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/1999-8-4/p425-damiani/",
  abstract =     "This article presents a hierarchy-aware classification
                 schema for object-oriented code, where software
                 components are classified according to their {\em
                 behavioral characteristics}, such as provided services,
                 employed algorithms, and needed data. In the case of
                 reusable application frameworks, these characteristics
                 are constructed from their {\em model}, i.e., from the
                 description of the abstract classes specifying both the
                 framework structure and purpose. In conventional object
                 libraries, the characteristics are extracted
                 semiautomatically from class interfaces.
                 Characteristics are term pairs, weighted to represent
                 ``how well'' they describe component behavior. The set
                 of characteristics associated with a given component
                 forms its {\em software descriptor}. A descriptor base
                 is presented where descriptors are organized on the
                 basis of structured relationships, such as similarity
                 and composition. The classification is supported by a
                 thesaurus acting as a language-independent unified
                 lexicon. The descriptor base is conceived for
                 developers who, besides conventionally browsing the
                 descriptors hierarchy, can query the system, specifying
                 a set of desired functionalities and getting a ranked
                 set of adaptable candidates. User feedback is taken
                 into account in order to progressively ameliorate the
                 quality of the descriptors according to the views of
                 the user community. Feedback is made dependent of the
                 user typology through a {\em user profile}.
                 Experimental results in terms of recall and precision
                 of the retrieval mechanism against a sample code base
                 are reported.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Information filtering}",
}

@Article{Bultan:2000:CMC,
  author =       "Tevfik Bultan and Richard Gerber and Christopher
                 League",
  title =        "Composite model-checking: verification with
                 type-specific symbolic representations",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "1",
  pages =        "3--50",
  month =        jan,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-1/p3-bultan/p3-bultan.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-1/p3-bultan/",
  abstract =     "There has been a surge of progress in automated
                 verification methods based on state exploration. In
                 areas like hardware design, these technologies are
                 rapidly augmenting key phases of testing and
                 validation. To date, one of the most successful of
                 these methods has been symbolic model-checking, in
                 which large finite-state machines are encoded into
                 compact data structures such as Binary Decision
                 Diagrams (BDDs), and are then checked for safety and
                 liveness properties. However, these techniques have not
                 realized the same success on software systems. One
                 limitation is their inability to deal with
                 infinite-state programs, even those with a single
                 unbounded integer. A second problem is that of finding
                 efficient representations for various variable types.
                 We recently proposed a model-checker for integer-based
                 systems that uses arithmetic constraints as the
                 underlying state representation. While this approach
                 easily verified some subtle, infinite-state concurrency
                 problems, it proved inefficient in its treatment of
                 boolean and (unordered) enumerated types--which are not
                 efficiently representable using arithmetic constraints.
                 In this article we present a new technique that
                 combines the strengths of both BDD and arithmetic
                 constraint representations. Our composite model merges
                 multiple type-specific symbolic representations in a
                 single model-checker. A system's transitions and
                 fixpoint computations are encoded using both BDD (for
                 boolean and enumerated types) and arithmetic
                 constraints (for integers) representations, where the
                 choice depends on the variable types. Our composite
                 model-checking strategy can be extended to other
                 symbolic representations provided that they support
                 operations such as intersection, union, complement,
                 equivalence checking, and relational image computation.
                 We also present conservative approximation techniques
                 for composite representations to address the
                 undecidability of model-checking on infinite-state
                 systems. We demonstrate the effectiveness of our
                 approach by analyzing two example software
                 specifications which include a mixture of booleans,
                 integers, and enumerated types. One of them is a
                 requirements specification for the control software of
                 a nuclear reactor's cooling system, and the other one
                 is a protocol specification.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "binary decision diagrams; Presburger arithmetic;
                 symbolic model-checking",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Formal methods}; Software
                 --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Model checking}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1); Theory of Computation --- Logics and Meanings
                 of Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Invariants}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Mechanical verification}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Pre- and post-conditions}; Software ---
                 Software Engineering --- Requirements/Specifications
                 (D.2.1); Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Tools}",
}

@Article{Corbett:2000:USA,
  author =       "James C. Corbett",
  title =        "Using shape analysis to reduce finite-state models of
                 concurrent {Java} programs",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "1",
  pages =        "51--93",
  month =        jan,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-1/p51-corbett/p51-corbett.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-1/p51-corbett/",
  abstract =     "Finite-state verification (e.g., model checking)
                 provides a powerful means to detect concurrency errors,
                 which are often subtle and difficult to reproduce.
                 Nevertheless, widespread use of this technology by
                 developers is unlikely until tools provide automated
                 support for extracting the required finite-state models
                 directly from program source. Unfortunately, the
                 dynamic features of modern languages such as Java
                 complicate the construction of compact finite-state
                 models for verification. In this article, we show how
                 shape analysis, which has traditionally been used for
                 computing alias information in optimizers, can be used
                 to greatly reduce the size of finite-state models of
                 concurrent Java programs by determining which
                 heap-allocated variables are accessible only by a
                 single thread, and which shared variables are protected
                 by locks. We also provide several other state-space
                 reductions based on the semantics of Java monitors. A
                 prototype of the reductions demonstrates their
                 effectiveness.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "concurrent systems; finite-state verification; Java;
                 model extraction; modeling; shape analysis; state-space
                 reductions",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4)",
}

@Article{Gunter:2000:ADB,
  author =       "Carl A. Gunter",
  title =        "Abstracting dependencies between software
                 configuration items",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "1",
  pages =        "94--131",
  month =        jan,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-1/p94-gunter/p94-gunter.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-1/p94-gunter/",
  abstract =     "This article studies an abstract model of dependencies
                 between software configuration items based on a theory
                 of concurrent computation over a class of Petri nets
                 called {\em production\/} nets. A general theory of
                 build optimizations and their correctness is developed
                 based on a form of abstract interpretation called a
                 {\em build abstraction\/}; these are created during a
                 build and are used to optimize subsequent builds.
                 Various examples of such optimizations are discussed.
                 The theory is used to show how properties can be
                 characterized and proved, and how optimizations can be
                 composed and compared.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "abstract interpretation; mathematical models of build
                 dependencies; Petri nets; software configuration
                 management",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4); Software --- Software Engineering
                 --- Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Restructuring, reverse engineering, and
                 reengineering}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7);
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Semantics of Programming Languages
                 (F.3.2)",
}

@Article{Sistla:2000:SSB,
  author =       "A. Prasad Sistla and Viktor Gyuris and E. Allen
                 Emerson",
  title =        "{SMC}: a symmetry-based model checker for verification
                 of safety and liveness properties",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "2",
  pages =        "133--166",
  month =        apr,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-2/p133-sistla/p133-sistla.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-2/p133-sistla/",
  abstract =     "The article presents the SMC system. SMC can be used
                 for checking safety and liveness properties of
                 concurrent programs under different fairness
                 assumptions. It is based on explicit state enumeration.
                 It combats the state explosion by exploiting symmetries
                 of the input concurrent program, usually present in the
                 form of identical processes, in two different ways.
                 Firstly, it reduces the number of explored states by
                 identifying those states that are equivalent under the
                 symmetries of the system; this is called {\em process
                 symmetry}. Secondly, it reduces the number of edges
                 explored from each state, in0 the reduced state graph,
                 by exploiting the symmetry of a single state; this is
                 called {\em state symmetry}. SMC works in an {\em
                 on-the-fly\/} manner; it constructs the reduced state
                 graph as and when it is needed. This method facilitates
                 early termination, speeds up model checking, and
                 reduces memory requirements. We employed SMC to check
                 the correctness of, among other standard examples, the
                 Link Layer part of the IEEE Standard 1394 ``Firewire''
                 high-speed serial bus protocol. SMC found deadlocks in
                 the protocol. SMC was also to check certain liveness
                 properties. A report on the case study is included in
                 the article.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Performance; Standardization;
                 Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "automata; model checking",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Model checking}; Software
                 --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Formal methods}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Specifying and Verifying and Reasoning about Programs
                 (F.3.1): {\bf Mechanical verification}; Software ---
                 Programming Techniques --- Concurrent Programming
                 (D.1.3)",
}

@Article{Ciancarini:2000:UCL,
  author =       "P. Ciancarini and F. Franz{\'e} and C. Mascolo",
  title =        "Using a coordination language to specify and analyze
                 systems containing mobile components",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "2",
  pages =        "167--198",
  month =        apr,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-2/p167-ciancarini/p167-ciancarini.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-2/p167-ciancarini/",
  abstract =     "New computing paradigms for network-aware applications
                 need specification languages able to deal with the
                 features of mobile code-based systems. A coordination
                 language provides a formal framework in which the
                 interaction of active entities can be expressed. A
                 coordination language deals with the creation and
                 destruction of code or complex agents, their
                 communication activities, as well as their distribution
                 and mobility in space. We show how the coordination
                 language PoliS offers a flexible basis for the
                 description and the automatic analysis of architectures
                 of systems including mobile entities. Polis is based on
                 multiple tuple spaces and offers a basis for defining,
                 studying, and controlling mobility as it allows
                 decoupling mobile entities from their environments both
                 in space and in time. The pattern-matching mechanism
                 adopted for communication helps in abstracting from
                 addressing issues. We have developed a model-checking
                 technique for the automatic analysis of PoliS
                 specifications. In the article we show how this
                 technique can be applied to mobile code-based systems",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Software Engineering --- Software/Program Verification
                 (D.2.4): {\bf Model checking}; Software --- Programming
                 Languages --- Formal Definitions and Theory (D.3.1):
                 {\bf Semantics}; Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf Concurrent,
                 distributed, and parallel languages}",
}

@Article{Louridas:2000:GMR,
  author =       "Panagiotis Louridas and Pericles Loucopoulos",
  title =        "A generic model for reflective design",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "2",
  pages =        "199--237",
  month =        apr,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-2/p199-louridas/p199-louridas.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-2/p199-louridas/",
  abstract =     "Rapid technological change has had an impact on the
                 nature of software. This has led to new exigencies and
                 to demands for software engineering that pay particular
                 attention to meeting them. We advocate that such
                 demands can be met, at least in large parts, through
                 the adoption of software engineering processes that are
                 founded on a reflective stance. To this end, we turn
                 our attention to the field of Design Rationale. We
                 analyze and characterize Design Rationale approaches
                 and show that despite surface differences between
                 different approaches, they all tend to be variants of a
                 relatively small set of static and dynamic affinities.
                 We use the synthesis of static and dynamic affinities
                 to develop a generic model for reflective design. The
                 model is nonprescriptive and affects minimally the
                 design process. It is context-independent and is
                 intended to be used as a facilitator in participative
                 design, supporting group communication and
                 deliberation. The potential utility of the model is
                 demonstrated through two examples, one from the world
                 of business design and the other from programming
                 language design",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "design aids; design rationale; development;
                 participative; reflective",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Elicitation
                 methods (e.g., rapid prototyping, interviews, JAD)};
                 Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2)",
}

@Article{Inverardi:2000:SCS,
  author =       "Paola Inverardi and Alexander L. Wolf and Daniel
                 Yankelevich",
  title =        "Static checking of system behaviors using derived
                 component assumptions",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "3",
  pages =        "239--272",
  month =        jul,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-3/p239-inverardi/p239-inverardi.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-3/p239-inverardi/",
  abstract =     "A critical challenge faced by the developer of a
                 software system is to understand whether the system's
                 components correctly integrate. While type theory has
                 provided substantial help in detecting and preventing
                 errors in mismatched static properties, much work
                 remains in the area of dynamics. In particular,
                 components make assumptions about their behavioral
                 interaction with other components, but currently we
                 have only limited ways in which to state those
                 assumptions and to analyze those assumptions for
                 correctness. We have formulated a method that begins to
                 address this problem. The method operates at the
                 architectural level so that behavioral integration
                 errors, such as deadlock, can be revealed early and at
                 a high level. For each component, a specification is
                 given of its interaction behavior. Form this
                 specification, assumptions that the component makes
                 about the corresponding interaction behavior of the
                 external context are automatically derived. We have
                 defined an algorithm that performs compatibility checks
                 between finite representations of a component's context
                 assumptions and the actual interaction behaviors of the
                 components with which it is intended to interact. A
                 configuration of a system is possible if and only if a
                 successful way of matching actual behaviors with
                 assumptions can be found. The state-space complexity of
                 this algorithm is significantly less than that of
                 comparable approaches, and in the worst case, the time
                 complexity is comparable to the worst case of standard
                 reachability analysis.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Theory; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "assumptions; chemical abstract machine model;
                 component-based systems; static analysis",
  subject =      "Software --- Software Engineering (D.2); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf Modules and interfaces}; Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf State diagrams}; Software --- Software
                 Engineering --- Software/Program Verification (D.2.4):
                 {\bf Assertion checkers}; Software --- Software
                 Engineering --- Software/Program Verification (D.2.4):
                 {\bf Formal methods}; Software --- Software Engineering
                 --- Software Architectures (D.2.11): {\bf Languages
                 (e.g., description, interconnection, definition)};
                 Theory of Computation --- Logics and Meanings of
                 Programs (F.3); Theory of Computation --- Logics and
                 Meanings of Programs --- Specifying and Verifying and
                 Reasoning about Programs (F.3.1): {\bf Assertions};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Mechanical verification};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Specification
                 techniques}",
}

@Article{Minsky:2000:LGI,
  author =       "Naftaly H. Minsky and Victoria Ungureanu",
  title =        "Law-governed interaction: a coordination and control
                 mechanism for heterogeneous distributed systems",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "3",
  pages =        "273--305",
  month =        jul,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-3/p273-minsky/p273-minsky.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-3/p273-minsky/",
  abstract =     "Software technology is undergoing a transition form
                 monolithic systems, constructed according to a single
                 overall design, into conglomerates of semiautonomous,
                 heterogeneous, and independently designed subsystems,
                 constructed and managed by different organizations,
                 with little, if any, knowledge of each other. Among the
                 problems inherent in such conglomerates, none is more
                 serious than the difficulty to {\em control\/} the
                 activities of the disparate agents operating in it, and
                 the difficulty for such agents to {\em coordinate\/}
                 their activities with each other. We argue that the
                 nature of coordination and control required for such
                 systems calls for the following principles to be
                 satisfied: (1) coordination policies need to be
                 enforced: (2) the enforcement needs to be
                 decentralized; and (3) coordination policies need to be
                 formulated explicitly--rather than being implicit in
                 the code of the agents involved--and they should be
                 enforced by means of a generic, broad spectrum
                 mechanism; and (4) it should be possible to deploy and
                 enforce a policy incrementally, without exacting any
                 cost from agents and activities not subject to it. We
                 describe a mechanism called law-governed interaction
                 (LGI), currently implemented by the Moses toolkit,
                 which has been designed to satisfy these principles. We
                 show that LGI is at least as general as a conventional
                 centralized coordination mechanism (CCM), and that it
                 is more scalable, and generally more efficient, then
                 CCM.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Performance; Security",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "coordination of heterogeneous agents; policy
                 enforcement; scalability",
  subject =      "Software --- Software Engineering --- Software
                 Architectures (D.2.11); Software --- Software
                 Engineering --- Design Tools and Techniques (D.2.2);
                 Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4); Information Systems --- Information Interfaces
                 and Presentation --- Group and Organization Interfaces
                 (H.5.3); Computer Applications --- Computers in Other
                 Systems (J.7): {\bf Command and control}",
}

@Article{Mills:2000:KBM,
  author =       "Kevin L. Mills and Hassan Gomaa",
  title =        "A knowledge-based method for inferring semantic
                 concepts from visual models of system behavior",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "3",
  pages =        "306--337",
  month =        jul,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-3/p306-mills/p306-mills.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-3/p306-mills/",
  abstract =     "Software designers use visual models, such as data
                 flow/control flow diagrams or object collaboration
                 diagrams, to express system behavior in a form that can
                 be understood easily by users and by programmers, and
                 from which designers can generate a software
                 architecture. The research described in this paper is
                 motivated by a desire to provide an automated
                 designer's assistant that can generate software
                 architectures for concurrent systems directly from
                 behavioral models expressed visually as flow diagrams.
                 To achieve this goal, an automated designer's assistant
                 must be capable of interpreting flow diagrams in
                 semantic, rather than syntactic, terms. While semantic
                 concepts can be attached manually to diagrams using
                 labels, such as stereotypes in the Unified Model
                 Language (UML), this paper considers the possibility of
                 providing automated assistance to infer appropriate
                 tags for symbols on a flow diagram. The approach relies
                 upon constructing an underlying metamodel that defines
                 semantic concepts based upon (1) syntactic
                 relationships among visual symbols and (2) inheritance
                 relationships among semantic concepts. Given such a
                 metamodel, a rule-based inference engine can, in many
                 situations, infer the presence of semantic concepts on
                 flow diagram, and can tag symbols accordingly. Futher,
                 an object-oriented query system can compare semantic
                 tags on digram instances for conformance with their
                 definition in the metamodel. To illustrate the
                 approach, the paper describes a metamodel for data
                 flow/control flow diagrams used in the context of a
                 specific software modeling method, Concurrent
                 Object-Based Real-time Analysis (COBRA). The metamodel
                 is implemented using an expert-system shell, CLIPS
                 V6.0, which integrates an object-oriented language with
                 a rule-based inference engine. \par

                 The paper applies the implemented metamodel to design
                 software for an automobile cruise-control system and
                 provides an evaluation of the approach based upon
                 results from four case studies. For the case studies,
                 the implemented metamodel recognized, automatically and
                 correctly, the existence of 86\% of all COBRA semantic
                 concepts within the flow diagrams. Varying degrees of
                 human assistance were used to correctly identify the
                 remaining semantic concepts within the diagrams: in two
                 percent of the cases the implemented metamodel reached
                 tentative classifications that a designer was asked to
                 confirm or override; in four percent of the cases a
                 designer was asked to provide additional information
                 before a concept was classified; in the remaining eight
                 percent of the cases the designer was asked to identify
                 the concept.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Experimentation; Measurement",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "concept classification systems; concurrent systems;
                 knowledge-based software engineering; semantic data
                 modeling; software design methods; visual modeling",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2)",
}

@Article{Wallach:2000:SSM,
  author =       "Dan S. Wallach and Andrew W. Appel and Edward W.
                 Felten",
  title =        "{SAFKASI}: a security mechanism for language-based
                 systems",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "4",
  pages =        "341--378",
  month =        oct,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-4/p341-wallach/p341-wallach.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-4/p341-wallach/",
  abstract =     "In order to run untrusted code in the same process as
                 trusted code, there must be a mechanism to allow
                 dangerous calls to determine if their caller is
                 authorized to exercise the privilege of using the
                 dangerous routine. Java systems have adopted a
                 technique called stack inspection to address this
                 concern. But its original definition, in terms of
                 searching stack frames, had an unclear relationship to
                 the actual achievement of security, overconstrained the
                 implementation of a Java system, limited many desirable
                 optimizations such as method inlining and tail
                 recursion, and generally interfered with
                 interprocedural optimization. We present a new
                 semantics for stack inspection based on a belief logic
                 and its implementation using the calculus of {\em
                 security-passing style\/} which addresses the concerns
                 of traditional stack inspection. With security-passing
                 style, we can efficiently represent the security
                 context for any method activation, and we can build a
                 new implementation strictly by rewriting the Java
                 bytecodes before they are loaded by the system. No
                 changes to the JVM or bytecode semantics are necessary.
                 With a combination of static analysis and runtime
                 optimizations, our prototype implementation shows
                 reasonable performance (although traditional stack
                 inspection is still faster), and is easier to consider
                 for languages beyond Java. We call our system SAFKASI
                 (the Security Architecture Formerly Known as Stack
                 Inspection).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Security",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "access control; applets; Internet; Java;
                 security-passing style; stack inspection; WWW",
  subject =      "Software --- Programming Techniques ---
                 Object-oriented Programming (D.1.5); Software ---
                 Software Engineering --- General (D.2.0): {\bf
                 Protection mechanisms}; Software --- Programming
                 Languages --- Language Classifications (D.3.2): {\bf
                 Object-oriented languages}; Software --- Operating
                 Systems --- Security and Protection (D.4.6): {\bf
                 Access controls}; Software --- Operating Systems ---
                 Security and Protection (D.4.6): {\bf Authentication}",
}

@Article{Fong:2000:PLM,
  author =       "Philip W. L. Fong and Robert D. Cameron",
  title =        "Proof linking: modular verification of mobile programs
                 in the presence of lazy, dynamic linking",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "4",
  pages =        "379--409",
  month =        oct,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-4/p379-fong/p379-fong.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-4/p379-fong/",
  abstract =     "Although mobile code systems typically employ
                 link-time code verifiers to protect host computers from
                 potentially malicious code, implementation flaws in the
                 verifiers may still leave the host system vulnerable to
                 attack. Compounding the inherent complexity of the
                 verification algorithms themselves, the need to support
                 lazy, dynamic linking in mobile code systems typically
                 leads to architectures that exhibit strong
                 interdependencies between the loader, the verifier, and
                 the linker. To simplify verifier construction and
                 provide improved assurances of verifier integrity, we
                 propose a modular architecture based on the concept of
                 proof linking. This architecture encapsulates the
                 verification process and removes dependencies between
                 the loader, the verifier, and the linker. We also
                 formally model the process of proof linking and
                 establish properties to which correct implementations
                 must conform. As an example, we instantiate our
                 architecture for the problem of Java bytecode
                 verification and assess the correctness of this
                 instantiation. Finally, we briefly discuss alternative
                 mobile code verification architectures enabled by the
                 proof-linking concept.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Design; Languages; Security; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "correctness conditions; dynamic linking; Java; mobile
                 code; modularity; proof linking; safety; verification
                 protocol; virtual machine architecture",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Correctness proofs};
                 Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Formal methods}; Software
                 --- Software Engineering ---
                 Requirements/Specifications (D.2.1); Software ---
                 Programming Languages --- Processors (D.3.4): {\bf
                 Run-time environments}",
}

@Article{Myers:2000:PPU,
  author =       "Andrew C. Myers and Barbara Liskov",
  title =        "Protecting privacy using the decentralized label
                 model",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "4",
  pages =        "410--442",
  month =        oct,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-4/p410-myers/p410-myers.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-4/p410-myers/",
  abstract =     "Stronger protection is needed for the confidentiality
                 and integrity of data, because programs containing
                 untrusted code are the rule rather than the exception.
                 Information flow control allows the enforcement of
                 end-to-end security policies, but has been difficult to
                 put into practice. This article describes the
                 decentralized label model, a new label model for
                 control of information flow in systems with mutual
                 distrust and decentralized authority. The model
                 improves on existing multilevel security models by
                 allowing users to declassify information in a
                 decentralized way, and by improving support for
                 fine-grained data sharing. It supports static program
                 analysis of information flow, so that programs can be
                 certified to permit only acceptable information flows,
                 while largely avoiding the overhead of run-time
                 checking. The article introduces the language Jif, an
                 extension to Java that provides static checking of
                 information flow using the decentralized label model.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Security",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "confidentiality; declassification; downgrading;
                 end-to-end; information flow controls; integrity;
                 lattice; policies; principals; roles; type checking",
  subject =      "Software --- Operating Systems --- Security and
                 Protection (D.4.6): {\bf Information flow controls}",
}

@Article{Clarke:2000:VSP,
  author =       "E. M. Clarke and S. Jha and W. Marrero",
  title =        "Verifying security protocols with {Brutus}",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "4",
  pages =        "443--487",
  month =        oct,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-4/p443-clarke/p443-clarke.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-4/p443-clarke/",
  abstract =     "Due to the rapid growth of the ``Internet'' and the
                 ``World Wide Web'' security has become a very important
                 concern in the design and implementation of software
                 systems. Since security has become an important issue,
                 the number of protocols in this domain has become very
                 large. These protocols are very diverse in nature. If a
                 software architect wants to deploy some of these
                 protocols in a system, they have to be sure that the
                 protocol has the right properties as dictated by the
                 requirements of the system. In this article we present
                 BRUTUS, a tool for verifying properties of security
                 protocols. This tool can be viewed as a special-purpose
                 model checker for security protocols. We also present
                 reduction techniques that make the tool efficient.
                 Experimental results are provided to demonstrate the
                 efficiency of BRUTUS.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Security; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "authentication and secure payment protocols; formal
                 methods; model-checking",
  subject =      "Software --- Software Engineering --- Software/Program
                 Verification (D.2.4): {\bf Model checking}; Software
                 --- Operating Systems --- Security and Protection
                 (D.4.6): {\bf Verification**}",
}

@Article{Durante:2000:CAC,
  author =       "Antonio Durante and Riccardo Focardi and Roberto
                 Gorrieri",
  title =        "A compiler for analyzing cryptographic protocols using
                 noninterference",
  journal =      j-TOSEM,
  volume =       "9",
  number =       "4",
  pages =        "488--528",
  month =        oct,
  year =         "2000",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2000-9-4/p488-durante/p488-durante.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2000-9-4/p488-durante/",
  abstract =     "The Security Process Algebra (SPA) is a CCS-like
                 specification language where actions belong to two
                 different levels of confidentiality. It has been used
                 to define several noninterference-like security
                 properties whose verification has been automated by the
                 tool CoSeC. In recent years, a method for analyzing
                 security protocols using SPA and CoSeC has been
                 developed. Even if it has been useful in analyzing
                 small security protocols, this method has shown to be
                 error-prone, as it requires the protocol description
                 and its environment to be written by hand. This problem
                 has been solved by defining a protocol specification
                 language more abstract than SPA, called VSP, and a
                 compiler CVS that automatically generates the SPA
                 specification for a given protocol described in VSP.
                 The VSP/CVS technology is very powerful, and its
                 usefulness is shown with some case studies: the Woo-Lam
                 one-way authentication protocol, for which a new attack
                 to authentication is found, and the Wide Mouthed Frog
                 protocol, where different kinds of attack are detected
                 and analyzed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Security; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "automatic verification; cryptographic protocols;
                 noninterference; process algebra; verification tool",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- General (C.2.0):
                 {\bf Security and protection (e.g., firewalls)};
                 Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2): {\bf Protocol verification}; Software ---
                 Software Engineering --- Software/Program Verification
                 (D.2.4): {\bf Formal methods}; Software --- Software
                 Engineering --- Software/Program Verification (D.2.4):
                 {\bf Model checking}; Software --- Software Engineering
                 --- Software/Program Verification (D.2.4): {\bf
                 Validation}; Theory of Computation --- Logics and
                 Meanings of Programs --- Semantics of Programming
                 Languages (F.3.2): {\bf Operational semantics}; Theory
                 of Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2): {\bf
                 Process models}",
}

@Article{Tip:2001:SBA,
  author =       "F. Tip and T. B. Dinesh",
  title =        "A slicing-based approach for locating type errors",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "1",
  pages =        "5--55",
  month =        jan,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2001-10-1/p5-tip/p5-tip.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2001-10-1/p5-tip/",
  abstract =     "The effectiveness of a type-checking tool strongly
                 depends on the accuracy of the positional information
                 that is associated with type errors. We present an
                 approach where the location associated with an error
                 message {\em e\/} is defined as a {\em slice\/} {\em P
                 e\/} of the program {\em P\/} being type-checked. We
                 show that this approach yields highly accurate
                 positional information: {\em P e\/} is a program that
                 contains precisely those program constructs in {\em
                 P\/} that caused error {\em e}. Semantically, we have
                 the interesting property that type-checking {\em P e\/}
                 is guaranteed to produce the same error {\em e}. Our
                 approach is completely language-independent and has
                 been implemented for a significant subset of Pascal. We
                 also report on experiments with object-oriented type
                 systems, and with a subset of ML.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "abstract interpretation; program slicing;
                 semantics-based tool generation; static semantics;
                 type-checking",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Translator writing systems and compiler
                 generators}; Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Specification
                 techniques}",
}

@Article{Chen:2001:TMO,
  author =       "Huo Yan Chen and T. H. Tse and T. Y. Chen",
  title =        "{TACCLE}: a methodology for object-oriented software
                 testing at the class and cluster levels",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "1",
  pages =        "56--109",
  month =        jan,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2001-10-1/p56-chen/p56-chen.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2001-10-1/p56-chen/",
  abstract =     "Object-oriented programming consists of several
                 different levels of abstraction, namely, the
                 algorithmic level, class level, cluster level, and
                 system level. The testing of object-oriented software
                 at the algorithmic and system levels is similar to
                 conventional program testing. Testing at the class and
                 cluster levels poses new challenges. Since methods and
                 objects may interact with one another with unforeseen
                 combinations and invocations, they are much more
                 complex to simulate and test than the hierarchy of
                 functional calls in conventional programs. In this
                 paper, we propose a methodology for object-oriented
                 software testing at the class and cluster levels. In
                 class-level testing, it is essential to determine
                 whether objects produced from the execution of
                 implemented systems would preserve the properties
                 defined by the specification, such as behavioral
                 equivalence and nonequivalence. Our class-level testing
                 methodology addresses both of these aspects. For the
                 testing of behavioral equivalence, we propose to select
                 fundamental pairs of equivalent ground terms as test
                 cases using a black-box technique based on algebraic
                 specifications, and then determine by means of a
                 white-box technique whether the objects resulting from
                 executing such test cases are observationally
                 equivalent. To address the testing of behavioral
                 nonequivalence, we have identified and analyzed several
                 nontrivial problems in the current literature. We
                 propose to classify term equivalence into four types,
                 thereby setting up new concepts and deriving important
                 properties. Based on these results, we propose an
                 approach to deal with the problems in the generation of
                 nonequivalent ground terms as test cases. Relatively
                 little research has contributed to cluster-level
                 testing. In this paper, we also discuss black-box
                 testing at the cluster level. We illustrate the
                 feasibility of using contract, a formal specification
                 language for the behavioral dependencies and
                 interactions among cooperating objects of different
                 classes in a given cluster. We propose an approach to
                 test the interactions among different classes using
                 every individual message-passing rule in the given
                 Contract specification. We also present an approach to
                 examine the interactions among composite
                 message-passing sequences. We have developed four
                 testing tools to support our methodology.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Languages; Reliability",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "algebraic specifications; contact specifications;
                 message passing; object-oriented programming; software
                 testing",
  subject =      "Software --- Software Engineering ---
                 Requirements/Specifications (D.2.1): {\bf Languages};
                 Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}; Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf Object-oriented languages}",
}

@Article{Rothermel:2001:MTS,
  author =       "Gregg Rothermel and Margaret Burnett and Lixin Li and
                 Christopher Dupuis and Andrei Sheretov",
  title =        "A methodology for testing spreadsheets",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "1",
  pages =        "110--147",
  month =        jan,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2001-10-1/p110-rothermel/p110-rothermel.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2001-10-1/p110-rothermel/",
  abstract =     "Spreadsheet languages, which include commercial
                 spreadsheets and various research systems, have had a
                 substantial impact on end-user computing. Research
                 shows, however, that spreadsheets often contain faults;
                 thus, we would like to provide at least some of the
                 benefits of formal testing methodologies to the
                 creators of spreadsheets. This article presents a
                 testing methodology that adapts data flow adequacy
                 criteria and coverage monitoring to the task of testing
                 spreadsheets. To accommodate the evaluation model used
                 with spreadsheets, and the interactive process by which
                 they are created, our methodology is incremental. To
                 accommodate the users of spreadsheet languages, we
                 provide an interface to our methodology that does not
                 require an understanding of testing theory. We have
                 implemented our testing methodology in the context of
                 the Froms/3 visual spreadsheet language. We report on
                 the methodology, its time and space costs, and the
                 mapping from the testing strategy to the user
                 interfaces. In an empirical study, we found that test
                 suites created according to our methodology detected,
                 on average, 81\% of the faults in a set of faulty
                 spreadsheets, significantly outperforming randomly
                 generated test suites.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Languages; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "software testing; spreadsheets",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5); Software --- Software Engineering
                 --- Programming Environments (D.2.6); Information
                 Systems --- Information Systems Applications --- Office
                 Automation (H.4.1); Software --- Programming Techniques
                 --- Visual Programming (D.1.7)",
}

@Article{Bible:2001:CSC,
  author =       "John Bible and Gregg Rothermel and David S.
                 Rosenblum",
  title =        "A comparative study of coarse- and fine-grained safe
                 regression test-selection techniques",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "2",
  pages =        "149--183",
  month =        apr,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2001-10-2/p149-bible/p149-bible.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2001-10-2/p149-bible/",
  abstract =     "{\em Regression test-selection techniques\/} reduce
                 the cost of regression testing by selecting a subset of
                 an existing test suite to use in retesting a modified
                 program. Over the past two decades, numerous regression
                 test-selection techniques have been described in the
                 literature. Initial empirical studies of some of these
                 techniques have suggested that they can indeed benefit
                 testers, but so far, few studies have empirically
                 compared different techniques. In this paper, we
                 present the results of a comparative empirical study of
                 two safe regression test-selection techniques. The
                 techniques we studied have been implemented as the
                 tools DejaVu and TestTube; we compared these tools in
                 terms of a cost model incorporating {\em precision\/}
                 (ability to eliminate unnecessary test cases), {\em
                 analysis cost}, and {\em test execution cost}. Our
                 results indicate, that in many instances, despite its
                 relative lack of precision, TestTube can reduce the
                 time required for regression testing as much as the
                 more precise DejaVu. In other instances, particularly
                 where the time required to execute test cases is long,
                 DejaVu's superior precision gives it a clear advantage
                 over TestTube. Such variations in relative performance
                 can complicate a tester's choice of which tool to use.
                 Our experimental results suggest that a hybrid
                 regression test-selection tool that combines features
                 of TestTube and DejaVu may be an answer to these
                 complications; we present an initial case study that
                 demonstrates the potential benefit of such a tool.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Verification",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "regression test selection; regression testing",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5)",
}

@Article{Graves:2001:ESR,
  author =       "Todd L. Graves and Mary Jean Harrold and Jung-Min Kim
                 and Adam Porter and Gregg Rothermel",
  title =        "An empirical study of regression test selection
                 techniques",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "2",
  pages =        "184--208",
  month =        apr,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2001-10-2/p184-graves/p184-graves.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2001-10-2/p184-graves/",
  abstract =     "Regression testing is the process of validating
                 modified software to detect whether new errors have
                 been introduced into previously tested code and to
                 provide confidence that modifications are correct.
                 Since regression testing is an expensive process,
                 researchers have proposed regression test selection
                 techniques as a way to reduce some of this expense.
                 These techniques attempt to reduce costs by selecting
                 and running only a subset of the test cases in a
                 program's existing test suite. Although there have been
                 some analytical and empirical evaluations of individual
                 techniques, to our knowledge only one comparative
                 study, focusing on one aspect of two of these
                 techniques, has been reported in the literature. We
                 conducted an experiment to examine the relative costs
                 and benefits of several regression test selection
                 techniques. The experiment examined five techniques for
                 reusing test cases, focusing on their relative
                 abilities to reduce regression testing effort and
                 uncover faults in modified programs. Our results
                 highlight several differences between the techniques,
                 and expose essential trade-offs that should be
                 considered when choosing a technique for practical
                 application.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "empirical study; regression testing; selective
                 retest",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}; Software --- Software
                 Engineering --- Testing and Debugging (D.2.5): {\bf
                 Debugging aids}",
}

@Article{Sinha:2001:ICD,
  author =       "Saurabh Sinha and Mary Jean Harrold and Gregg
                 Rothermel",
  title =        "Interprocedural control dependence",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "2",
  pages =        "209--254",
  month =        apr,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Apr 20 08:21:35 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tosem/2001-10-2/p209-sinha/p209-sinha.pdf;
                 http://www.acm.org/pubs/citations/journals/tosem/2001-10-2/p209-sinha/",
  abstract =     "Program-dependence information is useful for a variety
                 of applications, such as software testing and
                 maintenance tasks, and code optimization. Properly
                 defined, control and data dependences can be used to
                 identify semantic dependences. To function effectively
                 on whole programs, tools that utilize dependence
                 information require information about interprocedural
                 dependences: dependences that are identified by
                 analyzing the interactions among procedures. Many
                 techniques for computing interprocedural data
                 dependences exist; however, virtually no attention has
                 been paid to interprocedural control dependence.
                 Analysis techniques that fail to account for
                 interprocedural control dependences can suffer
                 unnecessary imprecision and loss of safety. This
                 article presents a definition of interprocedural
                 control dependence that supports the relationship of
                 control and data dependence to semantic dependence. The
                 article presents two approaches for computing
                 interprocedural control dependences, and empirical
                 results pertaining to the use of those approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  generalterms = "Algorithms; Languages; Theory",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "interprocedural analysis; interprocedural control
                 dependence; program slicing; semantic dependence;
                 software maintenance",
  subject =      "Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Debugging aids}; Software ---
                 Software Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7): {\bf Restructuring, reverse
                 engineering, and reengineering}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Control structures}; Software
                 --- Programming Languages --- Processors (D.3.4): {\bf
                 Compilers}; Software --- Programming Languages ---
                 Processors (D.3.4): {\bf Optimization}; Computing
                 Methodologies --- Symbolic and Algebraic Manipulation
                 --- Algorithms (I.1.2): {\bf Analysis of algorithms};
                 Software --- Software Engineering --- Testing and
                 Debugging (D.2.5): {\bf Testing tools (e.g., data
                 generators, coverage testing)}",
}

@Article{Gargantini:2001:ADR,
  author =       "Angelo Gargantini and Angelo Morzenti",
  title =        "Automated deductive requirements analysis of critical
                 systems",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "3",
  pages =        "255--307",
  month =        jul,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Perry:2001:PCL,
  author =       "Dewayne E. Perry and Harvey P. Siy and Lawrence G.
                 Votta",
  title =        "Parallel changes in large-scale software development:
                 an observational case study",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "3",
  pages =        "308--337",
  month =        jul,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Picco:2001:RAC,
  author =       "Gian Pietro Picco and Gruia-Catalin Roman and Peter J.
                 McCann",
  title =        "Reasoning about code mobility with mobile {UNITY}",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "3",
  pages =        "338--395",
  month =        jul,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Beauvais:2001:MSA,
  author =       "J.-R. Beauvais and E. Rutten and T. Gautier and R.
                 Houdebine and P. Le Guernic and Y.-M. Tang",
  title =        "Modeling statecharts and activitycharts as signal
                 equations",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "4",
  pages =        "397--451",
  month =        oct,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Bonifati:2001:DDM,
  author =       "Angela Bonifati and Fabiano Cattaneo and Stefano Ceri
                 and Alfonso Fuggetta and Stefano Paraboschi",
  title =        "Designing data marts for data warehouses",
  journal =      j-TOSEM,
  volume =       "10",
  number =       "4",
  pages =        "452--483",
  month =        oct,
  year =         "2001",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Medvidovic:2002:MSA,
  author =       "Nenad Medvidovic and David S. Rosenblum and David F.
                 Redmiles and Jason E. Robbins",
  title =        "Modeling software architectures in the {Unified
                 Modeling Language}",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "1",
  pages =        "2--57",
  month =        jan,
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Tsuchiya:2002:FCE,
  author =       "Tatsuhiro Tsuchiya and Tohru Kikuno",
  title =        "On fault classes and error detection capability of
                 specification-based testing",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "1",
  pages =        "58--62",
  month =        jan,
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Keidar:2002:IBT,
  author =       "Idit Keidar and Roger Khazan and Nancy Lynch and Alex
                 Shvartsman",
  title =        "An inheritance-based technique for building simulation
                 proofs incrementally",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "1",
  pages =        "63--91",
  month =        jan,
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Schrefl:2002:BCS,
  author =       "Michael Schrefl and Markus Stumptner",
  title =        "Behavior-consistent specialization of object life
                 cycles",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "1",
  pages =        "92--148",
  month =        jan,
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Feb 19 14:55:16 MST 2002",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Felder:2002:FDN,
  author =       "Miguel Felder and Mauro Pezz{\`e}",
  title =        "A formal design notation for real-time systems",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "2",
  pages =        "149--190",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Batory:2002:AET,
  author =       "Don Batory and Clay Johnson and Bob Macdonald and Dale
                 Von Heeder",
  title =        "Achieving extensibility through product-lines and
                 domain-specific languages: a case study",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "2",
  pages =        "191--214",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Smaragdakis:2002:MLO,
  author =       "Yannis Smaragdakis and Don Batory",
  title =        "Mixin layers: an object-oriented implementation
                 technique for refinements and collaboration-based
                 designs",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "2",
  pages =        "215--255",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Jackson:2002:ALO,
  author =       "Daniel Jackson",
  title =        "{Alloy}: a lightweight object modelling notation",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "2",
  pages =        "256--290",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Pons:2002:TAC,
  author =       "Alexander P. Pons",
  title =        "Temporal abstract classes and virtual temporal
                 specifications for real-time systems",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "3",
  pages =        "291--308",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Mockus:2002:TCS,
  author =       "Audris Mockus and Roy T. Fielding and James D.
                 Herbsleb",
  title =        "Two case studies of open source software development:
                 {Apache} and {Mozilla}",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "3",
  pages =        "309--346",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Liang:2002:EAA,
  author =       "Donglin Liang and Mary Jean Harrold",
  title =        "Equivalence analysis and its application in improving
                 the efficiency of program slicing",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "3",
  pages =        "347--383",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:17 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Anonymous:2002:OOJ,
  author =       "Anonymous",
  title =        "Obituary: {Ole-Johan Dahl, 1931--2002; Edsger Wybe
                 Dijkstra, 1930--2002; Kristen Nygaard, 1926--2002}",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "4",
  pages =        "385--385",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Bernardo:2002:AFS,
  author =       "Marco Bernardo and Paolo Ciancarini and Lorenzo
                 Donatiello",
  title =        "Architecting families of software systems with process
                 algebras",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "4",
  pages =        "386--426",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Hierons:2002:CTS,
  author =       "R. M. Hierons",
  title =        "Comparing test sets and criteria in the presence of
                 test hypotheses and fault domains",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "4",
  pages =        "427--448",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Egyed:2002:AAC,
  author =       "Alexander Egyed",
  title =        "Automated abstraction of class diagrams",
  journal =      j-TOSEM,
  volume =       "11",
  number =       "4",
  pages =        "449--491",
  year =         "2002",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ghezzi:2003:E,
  author =       "Carlo Ghezzi and Jeffrey N. Magee and Dieter Rombach
                 and Mary Lou Soffa",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "1",
  pages =        "1--2",
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Felty:2003:FSA,
  author =       "Amy P. Felty and Kedar S. Namjoshi",
  title =        "Feature specification and automated conflict
                 detection",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "1",
  pages =        "3--27",
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Nentwich:2003:FCC,
  author =       "Christian Nentwich and Wolfgang Emmerich and Anthony
                 Finkelstein and Ernst Ellmer",
  title =        "Flexible consistency checking",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "1",
  pages =        "28--63",
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Lopes:2003:HOA,
  author =       "Ant{\'o}nia Lopes and Michel Wermelinger and Jos{\'e}
                 Luiz Fiadeiro",
  title =        "Higher-order architectural connectors",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "1",
  pages =        "64--104",
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Staff:2003:R,
  author =       "{ACM Transactions on Software Engineering and
                 Methodology staff}",
  title =        "Reviewers 2002",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "1",
  pages =        "105--105",
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Aug 7 10:57:18 MDT 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Coen-Porisini:2003:FAD,
  author =       "Alberto Coen-Porisini and Matteo Pradella and Matteo
                 Rossi and Dino Mandrioli",
  title =        "A formal approach for designing {CORBA}-based
                 applications",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "2",
  pages =        "107--151",
  month =        apr,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 31 06:06:37 MST 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{VanDenBrand:2003:TRT,
  author =       "Mark G. J. {Van Den Brand} and Paul Klint and Jurgen
                 J. Vinju",
  title =        "Term rewriting with traversal functions",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "2",
  pages =        "152--190",
  month =        apr,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 31 06:06:37 MST 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Robillard:2003:SAS,
  author =       "Martin P. Robillard and Gail C. Murphy",
  title =        "Static analysis to support the evolution of exception
                 structure in object-oriented systems",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "2",
  pages =        "191--221",
  month =        apr,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 31 06:06:37 MST 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Durante:2003:ATE,
  author =       "Luca Durante and Riccardo Sisto and Adriano
                 Valenzano",
  title =        "Automatic testing equivalence verification of spi
                 calculus specifications",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "2",
  pages =        "222--284",
  month =        apr,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 31 06:06:37 MST 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Cohen:2003:AHQ,
  author =       "Yossi Cohen and Yishai A. Feldman",
  title =        "Automatic high-quality reengineering of database
                 programs by abstraction, transformation and
                 reimplementation",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "3",
  pages =        "285--316",
  month =        jul,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Dec 13 18:40:57 MST 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zambonelli:2003:DMS,
  author =       "Franco Zambonelli and Nicholas R. Jennings and Michael
                 Wooldridge",
  title =        "Developing multiagent systems: {The Gaia}
                 methodology",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "3",
  pages =        "317--370",
  month =        jul,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Dec 13 18:40:57 MST 2003",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Chechik:2003:MVS,
  author =       "Marsha Chechik and Benet Devereux and Steve
                 Easterbrook and Arie Gurfinkel",
  title =        "Multi-valued symbolic model-checking",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "4",
  pages =        "371--408",
  month =        oct,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Miller:2003:FTS,
  author =       "Tim Miller and Paul Strooper",
  title =        "A framework and tool support for the systematic
                 testing of model-based specifications",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "4",
  pages =        "409--439",
  month =        oct,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ferrari:2003:MCV,
  author =       "Gian-Luigi Ferrari and Stefania Gnesi and Ugo
                 Montanari and Marco Pistore",
  title =        "A model-checking verification environment for mobile
                 processes",
  journal =      j-TOSEM,
  volume =       "12",
  number =       "4",
  pages =        "440--473",
  month =        oct,
  year =         "2003",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zave:2004:ATT,
  author =       "Pamela Zave",
  title =        "Address translation in telecommunication features",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "1",
  pages =        "1--36",
  month =        jan,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Uchitel:2004:IES,
  author =       "Sebastian Uchitel and Jeff Kramer and Jeff Magee",
  title =        "Incremental elaboration of scenario-based
                 specifications and behavior models using implied
                 scenarios",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "1",
  pages =        "37--85",
  month =        jan,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Venkatasubramanian:2004:FMR,
  author =       "Nalini Venkatasubramanian and Carolyn Talcott and Gul
                 A. Agha",
  title =        "A formal model for reasoning about adaptive
                 {QoS}-enabled middleware",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "1",
  pages =        "86--147",
  month =        jan,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Akgul:2004:AIL,
  author =       "Tankut Akgul and Vincent J. {Mooney III}",
  title =        "Assembly instruction level reverse execution for
                 debugging",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "2",
  pages =        "149--198",
  month =        apr,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Orso:2004:CDD,
  author =       "Alessandro Orso and Saurabh Sinha and Mary Jean
                 Harrold",
  title =        "Classifying data dependences in the presence of
                 pointers for program comprehension, testing, and
                 debugging",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "2",
  pages =        "199--239",
  month =        apr,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Roshandel:2004:MSM,
  author =       "Roshanak Roshandel and Andr{\'e} {Van Der Hoek} and
                 Marija Mikic-Rakic and Nenad Medvidovic",
  title =        "{Mae}---a system model and environment for managing
                 architectural evolution",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "2",
  pages =        "240--276",
  month =        apr,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Rothermel:2004:TSC,
  author =       "Gregg Rothermel and Sebastian Elbaum and Alexey G.
                 Malishevsky and Praveen Kallakuri and Xuemei Qiu",
  title =        "On test suite composition and cost-effective
                 regression testing",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "3",
  pages =        "277--331",
  month =        jul,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Kramer:2004:CCM,
  author =       "Stefan Kramer and Hermann Kaindl",
  title =        "Coupling and cohesion metrics for knowledge-based
                 systems using frames and rules",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "3",
  pages =        "332--358",
  month =        jul,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Nov 4 07:55:52 MST 2004",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Dwyer:2004:FAV,
  author =       "Matthew B. Dwyer and Lori A. Clarke and Jamieson M.
                 Cobleigh and Gleb Naumovich",
  title =        "Flow analysis for verifying properties of concurrent
                 software systems",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "4",
  pages =        "359--430",
  month =        oct,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Apr 14 10:42:21 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Costagliola:2004:FMI,
  author =       "Gennaro Costagliola and Vincenzo Deufemia and Giuseppe
                 Polese",
  title =        "A framework for modeling and implementing visual
                 notations with applications to software engineering",
  journal =      j-TOSEM,
  volume =       "13",
  number =       "4",
  pages =        "431--487",
  month =        oct,
  year =         "2004",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Apr 14 10:42:21 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Milanova:2005:POS,
  author =       "Ana Milanova and Atanas Rountev and Barbara G. Ryder",
  title =        "Parameterized object sensitivity for points-to
                 analysis for {Java}",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "1",
  pages =        "1--41",
  month =        jan,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Apr 14 10:42:22 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Baresi:2005:FID,
  author =       "Luciano Baresi and Mauro Pezz{\`e}",
  title =        "Formal interpreters for diagram notations",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "1",
  pages =        "42--84",
  month =        jan,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Apr 14 10:42:22 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Sommerville:2005:ESI,
  author =       "Ian Sommerville and Jane Ransom",
  title =        "An empirical study of industrial requirements
                 engineering process assessment and improvement",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "1",
  pages =        "85--117",
  month =        jan,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Thu Apr 14 10:42:22 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ghezzi:2005:E,
  author =       "Carlo Ghezzi",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "2",
  pages =        "119--123",
  month =        apr,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon May 2 11:17:01 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Berstel:2005:SFM,
  author =       "Jean Berstel and Stefano Crespi Reghizzi and Gilles
                 Roussel and Pierluigi San Pietro",
  title =        "A scalable formal method for design and automatic
                 checking of user interfaces",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "2",
  pages =        "124--167",
  month =        apr,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon May 2 11:17:01 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Erwig:2005:SRS,
  author =       "Martin Erwig and Zhe Fu",
  title =        "Software reuse for scientific computing through
                 program generation",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "2",
  pages =        "168--198",
  month =        apr,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon May 2 11:17:01 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Dashofy:2005:CAD,
  author =       "Eric M. Dashofy and Andr{\'e} van der Hoek and Richard
                 N. Taylor",
  title =        "A comprehensive approach for the development of
                 modular software architecture description languages",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "2",
  pages =        "199--245",
  month =        apr,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon May 2 11:17:01 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Staff:2005:AR,
  author =       "{ACM Transactions on Software Engineering and
                 Methodology staff}",
  title =        "Acknowledgement of referees 2004",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "2",
  pages =        "246--246",
  month =        apr,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon May 2 11:17:01 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Lau:2005:EFC,
  author =       "Man F. Lau and Yuen T. Yu",
  title =        "An extended fault class hierarchy for
                 specification-based testing",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "3",
  pages =        "247--276",
  month =        jul,
  year =         "2005",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1072997.1072998",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 23 15:50:12 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Gervasi:2005:RAI,
  author =       "Vincenzo Gervasi and Didar Zowghi",
  title =        "Reasoning about inconsistencies in natural language
                 requirements",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "3",
  pages =        "277--330",
  month =        jul,
  year =         "2005",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1072997.1072999",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 23 15:50:12 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Klint:2005:TED,
  author =       "Paul Klint and Ralf L{\"a}mmel and Chris Verhoef",
  title =        "Toward an engineering discipline for grammarware",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "3",
  pages =        "331--380",
  month =        jul,
  year =         "2005",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1072997.1073000",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 23 15:50:12 MDT 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Osterweil:2005:E,
  author =       "Leon Osterweil and Carlo Ghezzi and Jeff Kramer and
                 Alexander Wolf",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "4",
  pages =        "381--382",
  month =        oct,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Nov 25 05:58:01 MST 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Estublier:2005:ISE,
  author =       "Jacky Estublier and David Leblang and Andr{\'e} van
                 der Hoek and Reidar Conradi and Geoffrey Clemm and
                 Walter Tichy and Darcy Wiborg-Weber",
  title =        "Impact of software engineering research on the
                 practice of software configuration management",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "4",
  pages =        "383--430",
  month =        oct,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Nov 25 05:58:01 MST 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ryder:2005:ISE,
  author =       "Barbara G. Ryder and Mary Lou Soffa and Margaret
                 Burnett",
  title =        "The impact of software engineering research on modern
                 programming languages",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "4",
  pages =        "431--477",
  month =        oct,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Nov 25 05:58:01 MST 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Frias:2005:RAS,
  author =       "Marcelo F. Frias and Carlos G. L{\'o}pez Pombo and
                 Gabriel A. Baum and Nazareno M. Aguirre and Thomas S.
                 E. Maibaum",
  title =        "Reasoning about static and dynamic properties in
                 alloy: a purely relational approach",
  journal =      j-TOSEM,
  volume =       "14",
  number =       "4",
  pages =        "478--526",
  month =        oct,
  year =         "2005",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Nov 25 05:58:01 MST 2005",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Eshuis:2006:SMC,
  author =       "Rik Eshuis",
  title =        "Symbolic model checking of {UML} activity diagrams",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "1",
  pages =        "1--38",
  month =        jan,
  year =         "2006",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1125808.1125809",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Apr 22 06:14:53 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Basin:2006:MDS,
  author =       "David Basin and J{\"u}rgen Doser and Torsten
                 Lodderstedt",
  title =        "Model driven security: {From UML} models to access
                 control infrastructures",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "1",
  pages =        "39--91",
  month =        jan,
  year =         "2006",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1125808.1125810",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jul 04 08:41:19 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Snook:2006:UBF,
  author =       "Colin Snook and Michael Butler",
  title =        "{UML-B}: {Formal} modeling and design aided by {UML}",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "1",
  pages =        "92--122",
  month =        jan,
  year =         "2006",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1125808.1125811",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Apr 22 06:14:53 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Counsell:2006:IUT,
  author =       "Steve Counsell and Stephen Swift and Jason Crampton",
  title =        "The interpretation and utility of three cohesion
                 metrics for object-oriented design",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "2",
  pages =        "123--149",
  month =        apr,
  year =         "2006",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1131421.1131422",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri May 12 07:34:50 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Fisher:2006:IAT,
  author =       "Marc {Fisher II} and Gregg Rothermel and Darren Brown
                 and Mingming Cao and Curtis Cook and Margaret Burnett",
  title =        "Integrating automated test generation into the
                 {WYSIWYT} spreadsheet testing methodology",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "2",
  pages =        "150--194",
  month =        apr,
  year =         "2006",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1131421.1131423",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri May 12 07:34:50 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zhao:2006:STS,
  author =       "Wei Zhao and Lu Zhang and Yin Liu and Jiasu Sun and
                 Fuqing Yang",
  title =        "{SNIAFL}: {Towards} a static noninteractive approach
                 to feature location",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "2",
  pages =        "195--226",
  month =        apr,
  year =         "2006",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1131421.1131424",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri May 12 07:34:50 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Hierons:2006:ACC,
  author =       "R. M. Hierons",
  title =        "Avoiding coincidental correctness in boundary value
                 analysis",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "3",
  pages =        "227--241",
  month =        jul,
  year =         "2006",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 29 05:26:07 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Sinha:2006:HMB,
  author =       "Avik Sinha and Carol Smidts",
  title =        "{HOTTest}: a model-based test design technique for
                 enhanced testing of domain-specific applications",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "3",
  pages =        "242--278",
  month =        jul,
  year =         "2006",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 29 05:26:07 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Murphy:2006:LCM,
  author =       "Amy L. Murphy and Gian Pietro Picco and Gruia-Catalin
                 Roman",
  title =        "{LIME}: a coordination model and middleware supporting
                 mobility of hosts and agents",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "3",
  pages =        "279--328",
  month =        jul,
  year =         "2006",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 29 05:26:07 MDT 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Thiran:2006:WBE,
  author =       "Philippe Thiran and Jean-Luc Hainaut and Geert-Jan
                 Houben and Djamal Benslimane",
  title =        "Wrapper-based evolution of legacy information
                 systems",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "4",
  pages =        "329--359",
  month =        oct,
  year =         "2006",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Nov 15 06:42:33 MST 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Brambilla:2006:PMW,
  author =       "Marco Brambilla and Stefano Ceri and Piero Fraternali
                 and Ioana Manolescu",
  title =        "Process modeling in {Web} applications",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "4",
  pages =        "360--409",
  month =        oct,
  year =         "2006",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Nov 15 06:42:33 MST 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Snelting:2006:EPC,
  author =       "Gregor Snelting and Torsten Robschink and Jens
                 Krinke",
  title =        "Efficient path conditions in dependence graphs for
                 software safety analysis",
  journal =      j-TOSEM,
  volume =       "15",
  number =       "4",
  pages =        "410--457",
  month =        oct,
  year =         "2006",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Nov 15 06:42:33 MST 2006",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Notkin:2007:Ea,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1189748.1189749",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:11:50 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ghezzi:2007:E,
  author =       "Carlo Ghezzi",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1189748.1189750",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:11:50 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Robillard:2007:RCS,
  author =       "Martin P. Robillard and Gail C. Murphy",
  title =        "Representing concerns in source code",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1189748.1189751",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:11:50 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "A software modification task often addresses several
                 concerns. A concern is anything a stakeholder may want
                 to consider as a conceptual unit, including features,
                 nonfunctional requirements, and design idioms. In many
                 cases, the source code implementing a concern is not
                 encapsulated in a single programming language module,
                 and is instead scattered and tangled throughout a
                 system. Inadequate separation of concerns increases the
                 difficulty of evolving software in a correct and
                 cost-effective manner. To make it easier to modify
                 concerns that are not well modularized, we propose an
                 approach in which the implementation of concerns is
                 documented in artifacts, called concern graphs. Concern
                 graphs are abstract models that describe which parts of
                 the source code are relevant to different concerns. We
                 present a formal model for concern graphs and the tool
                 support we developed to enable software developers to
                 create and use concern graphs during software evolution
                 tasks. We report on five empirical studies, providing
                 evidence that concern graphs support views and
                 operations that facilitate the task of modifying the
                 code implementing scattered concerns, are
                 cost-effective to create and use, and robust enough to
                 be used with different versions of a software system.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "aspect-oriented software development; concern
                 modeling; Java; Separation of concerns; software
                 evolution",
}

@Article{Xie:2007:DCA,
  author =       "Qing Xie and Atif M. Memon",
  title =        "Designing and comparing automated test oracles for
                 {GUI}-based software applications",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1189748.1189752",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:11:50 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Test designers widely believe that the overall
                 effectiveness and cost of software testing depends
                 largely on the type and number of test cases executed
                 on the software. This article shows that the test
                 oracle, a mechanism that determines whether a software
                 is executed correctly for a test case, also
                 significantly impacts the fault detection effectiveness
                 and cost of a test case. Graphical user interfaces
                 (GUIs), which have become ubiquitous for interacting
                 with today's software, have created new challenges for
                 test oracle development. Test designers manually
                 ``assert'' the expected values of specific properties
                 of certain GUI widgets in each test case; during test
                 execution, these assertions are used as test oracles to
                 determine whether the GUI executed correctly. Since a
                 test case for a GUI is a sequence of events, a test
                 designer must decide: (1) what to assert; and (2) how
                 frequently to check an assertion, for example, after
                 each event in the test case or after the entire test
                 case has completed execution. Variations of these two
                 factors significantly impact the fault-detection
                 ability and cost of a GUI test case. A technique to
                 declaratively specify different types of automated GUI
                 test oracles is described. Six instances of test
                 oracles are developed and compared in an experiment on
                 four software systems. The results show that test
                 oracles do affect the fault detection ability of test
                 cases in different and interesting ways: (1) Test cases
                 significantly lose their fault detection ability when
                 using ``weak'' test oracles; (2) in many cases,
                 invoking a ``thorough'' oracle at the end of test case
                 execution yields the best cost-benefit ratio; (3)
                 certain test cases detect faults only if the oracle is
                 invoked during a small ``window of opportunity'' during
                 test execution; and (4) using thorough and
                 frequently-executing test oracles can compensate for
                 not having long test cases.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "graphical user interfaces; GUI state; GUI testing;
                 Test oracles; user interfaces; widgets",
}

@Article{Broy:2007:FMS,
  author =       "Manfred Broy and Ingolf H. Kr{\"u}ger and Michael
                 Meisinger",
  title =        "A formal model of services",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1189748.1189753",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:11:50 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Service-oriented software systems rapidly gain
                 importance across application domains: They emphasize
                 functionality (services), rather structural entities
                 (components), as the basic building block for system
                 composition. More specifically, services coordinate the
                 interplay of components to accomplish specific tasks.
                 In this article, we establish a foundation of service
                 orientation: Based on the Focus theory of distributed
                 systems (see Broy and St{\o}len [2001]), we introduce a
                 theory and formal model of services. In Focus, systems
                 are composed of interacting components. A component is
                 a total behavior. We introduce a formal model of
                 services where, in contrast, a service is a partial
                 behavior. For services and components, we work out
                 foundational specification techniques and outline
                 methodological development steps. We show how services
                 can be structured and how software architectures can be
                 composed of services and components. Although our
                 emphasis is on a theoretical foundation of the notion
                 of services, we demonstrate utility of the concepts we
                 introduce by means of a running example from the
                 automotive domain.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "assumption/commitment specifications; Components;
                 service engineering; services; software architecture",
}

@Article{Notkin:2007:Eb,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "2",
  pages =        "6:1--6:??",
  month =        apr,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1217295.1237801",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Krishnamurthi:2007:FIA,
  author =       "Shriram Krishnamurthi and Kathi Fisler",
  title =        "Foundations of incremental aspect model-checking",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1217295.1217296",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Programs are increasingly organized around features,
                 which are encapsulated using aspects and other
                 linguistic mechanisms. Despite their growing popularity
                 amongst developers, there is a dearth of techniques for
                 computer-aided verification of programs that employ
                 these mechanisms. We present the theoretical
                 underpinnings for applying model checking to programs
                 (expressed as state machines) written using these
                 mechanisms. The analysis is incremental, examining only
                 components that change rather than verifying the entire
                 system every time one part of it changes. Our technique
                 assumes that the set of pointcut designators is known
                 statically, but the actual advice can vary. It handles
                 both static and dynamic pointcut designators. We
                 present the algorithm, prove it sound, and address
                 several subtleties that arise, including cascading
                 advice application and problems of circular
                 reasoning.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "aspect-oriented programming; feature-oriented
                 software; Incremental verification; model checking;
                 modular verification",
}

@Article{Binkley:2007:ESS,
  author =       "David Binkley and Nicolas Gold and Mark Harman",
  title =        "An empirical study of static program slice size",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1217295.1217297",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "This article presents results from a study of all
                 slices from 43 programs, ranging up to 136,000 lines of
                 code in size. The study investigates the effect of five
                 aspects that affect slice size. Three slicing
                 algorithms are used to study two algorithmic aspects:
                 calling-context treatment and slice granularity. The
                 remaining three aspects affect the upstream
                 dependencies considered by the slicer. These include
                 collapsing structure fields, removal of dead code, and
                 the influence of points-to analysis.\par

                 The results show that for the most precise slicer, the
                 average slice contains just under one-third of the
                 program. Furthermore, ignoring calling context causes a
                 50\% increase in slice size, and while (coarse-grained)
                 function-level slices are 33\% larger than
                 corresponding statement-level slices, they may be
                 useful predictors of the (finer-grained)
                 statement-level slice size. Finally, upstream analyses
                 have an order of magnitude less influence on slice
                 size.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Program slicing; slice size",
}

@Article{Gamatie:2007:PDE,
  author =       "Abdoulaye Gamati{\'e} and Thierry Gautier and Paul {Le
                 Guernic} and Jean-Pierre Talpin",
  title =        "Polychronous design of embedded real-time
                 applications",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1217295.1217298",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Embedded real-time systems consist of hardware and
                 software that controls the behavior of a device or
                 plant. They are ubiquitous in today's technological
                 landscape and found in domains such as
                 telecommunications, nuclear power, avionics, and
                 medical technology. These systems are difficult to
                 design and build because they must satisfy both
                 functional and timing requirements to work correctly in
                 their intended environment. Furthermore, embedded
                 systems are often critical systems, where failure can
                 lead to loss of life, loss of mission, or serious
                 financial consequences. Because of the difficulty in
                 creating these systems and the consequences of failure,
                 they require rigorous and reliable design approaches.
                 The synchronous approach is one possible answer to this
                 demand. Its mathematical basis provides formal concepts
                 that favor the trusted design of embedded real-time
                 systems. The multiclock or polychronous model stands
                 out from other synchronous specification models by its
                 capability to enable the design of systems where each
                 component holds its own activation clock as well as
                 single-clocked systems in a uniform way. A great
                 advantage is its convenience for component-based design
                 approaches that enable modular development of
                 increasingly complex modern systems. The expressiveness
                 of its underlying semantics allows dealing with several
                 issues of real-time design. This article exposes
                 insights gained during recent years from the design of
                 real-time applications within the polychronous
                 framework. In particular, it shows promising results
                 about the design of applications from the avionics
                 domain.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Avionics; IMA; Signal; Synchronous approach",
}

@Article{Kapoor:2007:TCF,
  author =       "Kalpesh Kapoor and Jonathan P. Bowen",
  title =        "Test conditions for fault classes in {Boolean}
                 specifications",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jul,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1243987.1243988",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:41 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Fault-based testing of software checks the software
                 implementation for a set of faults. Two previous papers
                 on fault-based testing [Kuhn 1999; Tsuchiya and Kikuno
                 2002] represent the required behavior of the software
                 as a Boolean specification represented in Disjunctive
                 Normal Form (DNF) and then show that faults may be
                 organized in a hierarchy. This article extends these
                 results by identifying necessary and sufficient
                 conditions for fault-based testing. Unlike previous
                 solutions, the formal analysis used to derive these
                 conditions imposes no restrictions (such as DNF) on the
                 form of the Boolean specification.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Boolean specification; fault classes; Fault-based
                 testing",
}

@Article{Paige:2007:MBM,
  author =       "Richard F. Paige and Phillip J. Brooke and Jonathan S.
                 Ostroff",
  title =        "Metamodel-based model conformance and multiview
                 consistency checking",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "3",
  pages =        "11:1--11:??",
  month =        jul,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1243987.1243989",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:41 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Model-driven development, using languages such as UML
                 and BON, often makes use of multiple diagrams (e.g.,
                 class and sequence diagrams) when modeling systems.
                 These diagrams, presenting different views of a system
                 of interest, may be inconsistent. A metamodel provides
                 a unifying framework in which to ensure and check
                 consistency, while at the same time providing the means
                 to distinguish between valid and invalid models, that
                 is, conformance. Two formal specifications of the
                 metamodel for an object-oriented modeling language are
                 presented, and it is shown how to use these
                 specifications for model conformance and multiview
                 consistency checking. Comparisons are made in terms of
                 completeness and the level of automation each provide
                 for checking multiview consistency and model
                 conformance. The lessons learned from applying formal
                 techniques to the problems of metamodeling, model
                 conformance, and multiview consistency checking are
                 summarized.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "automated verification; formal methods; Metamodeling;
                 multiview consistency",
}

@Article{Basu:2007:MCJ,
  author =       "Samik Basu and Scott A. Smolka",
  title =        "Model checking the {Java} metalocking algorithm",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jul,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1243987.1243990",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:41 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We report on our efforts to use the XMC model checker
                 to model and verify the Java metalocking algorithm. XMC
                 [Ramakrishna et al. 1997] is a versatile and efficient
                 model checker for systems specified in XL, a highly
                 expressive value-passing language. Metalocking [Agesen
                 et al. 1999] is a highly-optimized technique for
                 ensuring mutually exclusive access by threads to object
                 monitor queues and, therefore; plays an essential role
                 in allowing Java to offer concurrent access to objects.
                 Metalocking can be viewed as a two-tiered scheme. At
                 the upper level, the metalock level, a thread waits
                 until it can enqueue itself on an object's monitor
                 queue in a mutually exclusive manner. At the lower
                 level, the monitor-lock level, enqueued threads race to
                 obtain exclusive access to the object. Our abstract XL
                 specification of the metalocking algorithm is fully
                 parameterized, both on the number of threads M, and the
                 number of objects N. It also captures a sophisticated
                 optimization of the basic metalocking algorithm known
                 as extra-fast locking and unlocking of uncontended
                 objects. Using XMC, we show that for a variety of
                 values of M and N, the algorithm indeed provides mutual
                 exclusion and freedom from deadlock and lockout at the
                 metalock level. We also show that, while the
                 monitor-lock level of the protocol preserves mutual
                 exclusion and deadlock-freedom, it is not lockout-free
                 because the protocol's designers chose to give equal
                 preference to awaiting threads and newly arrived
                 threads.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Java; metalocking; monitor queues; mutual exclusion;
                 synchronized methods; XMC",
}

@Article{DeLucia:2007:RTL,
  author =       "Andrea {De Lucia} and Fausto Fasano and Rocco Oliveto
                 and Genoveffa Tortora",
  title =        "Recovering traceability links in software artifact
                 management systems using information retrieval
                 methods",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "4",
  pages =        "13:1--13:??",
  month =        sep,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1276933.1276934",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:55 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The main drawback of existing software artifact
                 management systems is the lack of automatic or
                 semi-automatic traceability link generation and
                 maintenance. We have improved an artifact management
                 system with a traceability recovery tool based on
                 Latent Semantic Indexing (LSI), an information
                 retrieval technique. We have assessed LSI to identify
                 strengths and limitations of using information
                 retrieval techniques for traceability recovery and
                 devised the need for an incremental approach. The
                 method and the tool have been evaluated during the
                 development of seventeen software projects involving
                 about 150 students. We observed that although tools
                 based on information retrieval provide a useful support
                 for the identification of traceability links during
                 software development, they are still far to support a
                 complete semi-automatic recovery of all links. The
                 results of our experience have also shown that such
                 tools can help to identify quality problems in the
                 textual description of traced artifacts.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "impact analysis; latent semantic indexing; Software
                 artifact management; traceability management",
}

@Article{Wassermann:2007:SCD,
  author =       "Gary Wassermann and Carl Gould and Zhendong Su and
                 Premkumar Devanbu",
  title =        "Static checking of dynamically generated queries in
                 database applications",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "4",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1276933.1276935",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:55 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many data-intensive applications dynamically construct
                 queries in response to client requests and execute
                 them. Java servlets, for example, can create strings
                 that represent SQL queries and then send the queries,
                 using JDBC, to a database server for execution. The
                 servlet programmer enjoys static checking via Java's
                 strong type system. However, the Java type system does
                 little to check for possible errors in the dynamically
                 generated SQL query strings. Thus, a type error in a
                 generated selection query (e.g., comparing a string
                 attribute with an integer) can result in an SQL runtime
                 exception. Currently, such defects must be rooted out
                 through careful testing, or (worse) might be found by
                 customers at runtime. In this article, we present a
                 sound, static program analysis technique to verify that
                 dynamically generated query strings do not contain type
                 errors. We describe our analysis technique and provide
                 soundness results for our static analysis algorithm. We
                 also describe the details of a prototype tool based on
                 the algorithm and present several illustrative defects
                 found in senior software-engineering student-team
                 projects, online tutorial examples, and a real-world
                 purchase order system written by one of the authors.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "context-free language reachability; database queries;
                 JDBC; Static checking",
}

@Article{Baresi:2007:TES,
  author =       "Luciano Baresi and Sandro Morasca",
  title =        "Three empirical studies on estimating the design
                 effort of {Web} applications",
  journal =      j-TOSEM,
  volume =       "16",
  number =       "4",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1276933.1276936",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:12:55 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Our research focuses on the effort needed for
                 designing modern Web applications. The design effort is
                 an important part of the total development effort,
                 since the implementation can be partially automated by
                 tools.\par

                 We carried out three empirical studies with students of
                 advanced university classes enrolled in engineering and
                 communication sciences curricula. The empirical studies
                 are based on the use of W2000, a special-purpose design
                 notation for the design of Web applications, but the
                 hypotheses and results may apply to a wider class of
                 modeling notations (e.g., OOHDM, WebML, or UWE). We
                 started by investigating the relative importance of
                 each design activity. We then assessed the accuracy of
                 a priori design effort predictions and the influence of
                 a few process-related factors on the effort needed for
                 each design activity. We also analyzed the impact of
                 attributes like the size and complexity of W2000 design
                 artifacts on the total effort needed to design the user
                 experience of web applications. In addition, we carried
                 out a finer-grain analysis, by studying which of these
                 attributes impact the effort devoted to the steps of
                 the design phase that are followed when using W2000.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "effort estimation; empirical study; W2000; Web
                 application design",
}

@Article{Notkin:2007:Ec,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "1",
  pages =        "1:1--1:2",
  month =        dec,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1314493.1314494",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:04 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Meyers:2007:ESS,
  author =       "Timothy M. Meyers and David Binkley",
  title =        "An empirical study of slice-based cohesion and
                 coupling metrics",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "1",
  pages =        "2:1--2:27",
  month =        dec,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1314493.1314495",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:04 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Software reengineering is a costly endeavor, due in
                 part to the ambiguity of where to focus reengineering
                 effort. Coupling and Cohesion metrics, particularly
                 quantitative cohesion metrics, have the potential to
                 aid in this identification and to measure progress. The
                 most extensive work on such metrics is with slice-based
                 cohesion metrics. While their use of semantic
                 dependence information should make them an excellent
                 choice for cohesion measurement, their wide spread use
                 has been impeded in part by a lack of empirical
                 study.\par

                 Recent advances in software tools make, for the first
                 time, a large-scale empirical study of slice-based
                 cohesion and coupling metrics possible. Four results
                 from such a study are presented. First,
                 ``head-to-head'' qualitative and quantitative
                 comparisons of the metrics identify which metrics
                 provide similar views of a program and which provide
                 unique views of a program. This study includes
                 statistical analysis showing that slice-based metrics
                 are not proxies for simple size-based metrics such as
                 lines of code. Second, two longitudinal studies show
                 that slice-based metrics quantify the deterioration of
                 a program as it ages. This serves to validate the
                 metrics: the metrics quantify the degradation that
                 exists during development; turning this around, the
                 metrics can be used to measure the progress of a
                 reengineering effort. Third, baseline values for
                 slice-based metrics are provided. These values act as
                 targets for reengineering efforts with modules having
                 values outside the expected range being the most in
                 need of attention. Finally, slice-based coupling is
                 correlated and compared with slice-based cohesion.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "cohesion; coupling; reengineering; Slicing; software
                 intervention",
}

@Article{Marin:2007:ICC,
  author =       "Marius Marin and Arie {Van Deursen} and Leon Moonen",
  title =        "Identifying {Crosscutting Concerns Using Fan-In
                 Analysis}",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "1",
  pages =        "3:1--3:37",
  month =        dec,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1314493.1314496",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:04 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Aspect mining is a reverse engineering process that
                 aims at finding crosscutting concerns in existing
                 systems. This article proposes an aspect mining
                 approach based on determining methods that are called
                 from many different places, and hence have a high
                 fan-in, which can be seen as a symptom of crosscutting
                 functionality. The approach is semiautomatic, and
                 consists of three steps: metric calculation, method
                 filtering, and call site analysis. Carrying out these
                 steps is an interactive process supported by an Eclipse
                 plug-in called FINT. Fan-in analysis has been applied
                 to three open source Java systems, totaling around
                 200,000 lines of code. The most interesting concerns
                 identified are discussed in detail, which includes
                 several concerns not previously discussed in the
                 aspect-oriented literature. The results show that a
                 significant number of crosscutting concerns can be
                 recognized using fan-in analysis, and each of the three
                 steps can be supported by tools.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Aspect-oriented programming; crosscutting concerns;
                 fan-in metric; reverse engineering",
}

@Article{Frias:2007:EAD,
  author =       "Marcelo F. Frias and Carlos G. Lopez Pombo and Juan P.
                 Galeotti and Nazareno M. Aguirre",
  title =        "Efficient {Analysis} of {DynAlloy Specifications}",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "1",
  pages =        "4:1--4:34",
  month =        dec,
  year =         "2007",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1314493.1314497",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:04 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "DynAlloy is an extension of Alloy to support the
                 definition of actions and the specification of
                 assertions regarding execution traces. In this article
                 we show how we can extend the Alloy tool so that
                 DynAlloy specifications can be automatically analyzed
                 in an efficient way. We also demonstrate that
                 DynAlloy's semantics allows for a sound technique that
                 we call program atomization, which improves the
                 analyzability of properties regarding execution traces
                 by considering certain programs as atomic steps in a
                 trace.\par

                 We present the foundations, case studies, and empirical
                 results indicating that the analysis of DynAlloy
                 specifications can be performed efficiently.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Alloy; dynamic logic; software specification; software
                 validation",
}

@Article{Notkin:2008:Ea,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "5:1--5:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348251",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Notkin:2008:ISS,
  author =       "David Notkin and Mauro Pezz{\`e}",
  title =        "Introduction to the special section from the {ACM}
                 international symposium on software testing and
                 analysis {(ISSTA 2006)}",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "6:1--6:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348252",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Cobleigh:2008:BHD,
  author =       "Jamieson M. Cobleigh and George S. Avrunin and Lori A.
                 Clarke",
  title =        "Breaking up is hard to do: an evaluation of automated
                 assume-guarantee reasoning",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "7:1--7:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348253",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Finite-state verification techniques are often
                 hampered by the state-explosion problem. One proposed
                 approach for addressing this problem is
                 assume-guarantee reasoning, where a system under
                 analysis is partitioned into subsystems and these
                 subsystems are analyzed individually. By composing the
                 results of these analyses, it can be determined whether
                 or not the system satisfies a property. Because each
                 subsystem is smaller than the whole system, analyzing
                 each subsystem individually may reduce the overall cost
                 of verification. Often the behavior of a subsystem is
                 dependent on the subsystems with which it interacts,
                 and thus it is usually necessary to provide assumptions
                 about the environment in which a subsystem executes.
                 Because developing assumptions has been a difficult
                 manual task, the evaluation of assume-guarantee
                 reasoning has been limited. Using recent advances for
                 automatically generating assumptions, we undertook a
                 study to determine if assume-guarantee reasoning
                 provides an advantage over monolithic verification. In
                 this study, we considered all two-way decompositions
                 for a set of systems and properties, using two
                 different verifiers, FLAVERS and LTSA. By increasing
                 the number of repeated tasks in these systems, we
                 evaluated the decompositions as they were scaled. We
                 found that in only a few cases can assume-guarantee
                 reasoning verify properties on larger systems than
                 monolithic verification can, and in these cases the
                 systems that can be analyzed are only a few sizes
                 larger. Although these results are discouraging, they
                 provide insight about research directions that should
                 be pursued and highlight the importance of experimental
                 evaluation in this area.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Assume-guarantee reasoning",
}

@Article{Csallner:2008:DCH,
  author =       "Christoph Csallner and Yannis Smaragdakis and Tao
                 Xie",
  title =        "{DSD-Crasher}: a hybrid analysis tool for bug
                 finding",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "8:1--8:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348254",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "DSD-Crasher is a bug finding tool that follows a
                 three-step approach to program analysis:\par

                 D. Capture the program's intended execution behavior
                 with dynamic invariant detection. The derived
                 invariants exclude many unwanted values from the
                 program's input domain.\par

                 S. Statically analyze the program within the restricted
                 input domain to explore many paths.\par

                 D. Automatically generate test cases that focus on
                 reproducing the predictions of the static analysis.
                 Thereby confirmed results are feasible.\par

                 This three-step approach yields benefits compared to
                 past two-step combinations in the literature. In our
                 evaluation with third-party applications, we
                 demonstrate higher precision over tools that lack a
                 dynamic step and higher efficiency over tools that lack
                 a static step.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Automatic testing; bug finding; dynamic analysis;
                 dynamic invariant detection; extended static checking;
                 false positives; static analysis; test case generation;
                 usability",
}

@Article{Fink:2008:ETV,
  author =       "Stephen J. Fink and Eran Yahav and Nurit Dor and G.
                 Ramalingam and Emmanuel Geay",
  title =        "Effective typestate verification in the presence of
                 aliasing",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "9:1--9:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348255",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "This article addresses the challenge of sound
                 typestate verification, with acceptable precision, for
                 real-world Java programs. We present a novel framework
                 for verification of typestate properties, including
                 several new techniques to precisely treat aliases
                 without undue performance costs. In particular, we
                 present a flow-sensitive, context-sensitive, integrated
                 verifier that utilizes a parametric abstract domain
                 combining typestate and aliasing information. To scale
                 to real programs without compromising precision, we
                 present a staged verification system in which faster
                 verifiers run as early stages which reduce the workload
                 for later, more precise, stages.\par

                 We have evaluated our framework on a number of real
                 Java programs, checking correct API usage for various
                 Java standard libraries. The results show that our
                 approach scales to hundreds of thousands of lines of
                 code, and verifies correctness for 93\% of the
                 potential points of failure.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Alias analysis; program verification; typestate",
}

@Article{Siegel:2008:CSE,
  author =       "Stephen F. Siegel and Anastasia Mironova and George S.
                 Avrunin and Lori A. Clarke",
  title =        "Combining symbolic execution with model checking to
                 verify parallel numerical programs",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "10:1--10:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348256",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We present a method to verify the correctness of
                 parallel programs that perform complex numerical
                 computations, including computations involving
                 floating-point arithmetic. This method requires that a
                 sequential version of the program be provided, to serve
                 as the specification for the parallel one. The key idea
                 is to use model checking, together with symbolic
                 execution, to establish the equivalence of the two
                 programs. In this approach the path condition from
                 symbolic execution of the sequential program is used to
                 constrain the search through the parallel program. To
                 handle floating-point operations, three different types
                 of equivalence are supported. Several examples are
                 presented, demonstrating the approach and actual errors
                 that were found. Limitations and directions for future
                 research are also described.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "concurrency; Finite-state verification;
                 floating-point; high performance computing; Message
                 Passing Interface; model checking; MPI; numerical
                 program; parallel programming; Spin; symbolic
                 execution",
}

@Article{Tiwana:2008:ICD,
  author =       "Amrit Tiwana",
  title =        "Impact of classes of development coordination tools on
                 software development performance: a multinational
                 empirical study",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "2",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1348250.1348257",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Jun 16 11:13:13 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Although a diverse variety of software development
                 coordination tools are widely used in practice,
                 considerable debate surrounds their impact on software
                 development performance. No large-scale field research
                 has systematically examined their impact on software
                 development performance. This paper reports the results
                 of a multinational field study of software projects in
                 209 software development organizations to empirically
                 examine the influence of six key classes of development
                 coordination tools on the efficiency (reduction of
                 development rework, budget compliance) and
                 effectiveness (defect reduction) of software
                 development performance.\par

                 Based on an in-depth field study, the article
                 conceptualizes six holistic classes of development
                 coordination tools. The results provide nuanced
                 insights---some counter to prevailing beliefs---into
                 the relationships between the use of various classes of
                 development coordination tools and software development
                 performance. The overarching finding is that the
                 performance benefits of development coordination tools
                 are contingent on the salient types of novelty in a
                 project. The dimension of development
                 performance---efficiency or effectiveness---that each
                 class of tools is associated with varies systematically
                 with whether a project involves conceptual novelty,
                 process novelty, multidimensional novelty (both process
                 and conceptual novelty), or neither. Another noteworthy
                 insight is that the use of some classes of tools
                 introduces an efficiency-effectiveness tradeoff.
                 Collectively, the findings are among the first to offer
                 empirical support for the varied performance impacts of
                 various classes of development coordination tools and
                 have important implications for software development
                 practice. The paper also identifies several promising
                 areas for future research.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "collaborative software engineering. Software
                 outsourcing; coordination; development coordination
                 tools; development tools; efficiency effectiveness
                 tradeoff; empirical study; field study; knowledge
                 integration; knowledge management; outsourcing; project
                 management; regression analysis; Software development",
}

@Article{Notkin:2008:Eb,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "3",
  pages =        "12:1--12:??",
  month =        jun,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1363102.1363103",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 25 08:43:45 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Mohagheghi:2008:EIS,
  author =       "Parastoo Mohagheghi and Reidar Conradi",
  title =        "An empirical investigation of software reuse benefits
                 in a large telecom product",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "3",
  pages =        "13:1--13:??",
  month =        jun,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1363102.1363104",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 25 08:43:45 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "{\em Background}. This article describes a case study
                 on the benefits of software reuse in a large telecom
                 product. The reused components were developed in-house
                 and shared in a product-family approach. {\em Methods}.
                 Quantitative data mined from company repositories are
                 combined with other quantitative data and qualitative
                 observations. {\em Results}. We observed significantly
                 lower fault density and less modified code between
                 successive releases of the reused components. Reuse and
                 standardization of software architecture and processes
                 allowed easier transfer of development when
                 organizational changes happened. {\em Conclusions}. The
                 study adds to the evidence of quality benefits of
                 large-scale reuse programs and explores organizational
                 motivations and outcomes.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "fault density; product family; risks; software reuse;
                 standardization",
}

@Article{Henkel:2008:DDA,
  author =       "Johannes Henkel and Christoph Reichenbach and Amer
                 Diwan",
  title =        "Developing and debugging algebraic specifications for
                 {Java} classes",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1363102.1363105",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 25 08:43:45 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Modern programs make extensive use of reusable
                 software libraries. For example, a study of a number of
                 large Java applications shows that between 17\% and
                 30\% of the classes in those applications use container
                 classes defined in the {\tt java.util} package. Given
                 this extensive code reuse in Java programs, it is
                 important for the interfaces of reusable classes to be
                 well documented. An interface is well documented if it
                 satisfies the following requirements: (1) the
                 documentation completely describes how to use the
                 interface; (2) the documentation is clear; (3) the
                 documentation is unambiguous; and (4) any deviation
                 between the documentation and the code is machine
                 detectable. Unfortunately, documentation in natural
                 language, which is the norm, does not satisfy the above
                 requirements. Formal specifications can satisfy them
                 but they are difficult to develop, requiring
                 significant effort on the part of programmers.\par

                 To address the practical difficulties with formal
                 specifications, we describe and evaluate a tool to help
                 programmers write and debug algebraic specifications.
                 Given an algebraic specification of a class, our
                 interpreter generates a prototype that can be used
                 within an application like a regular Java class. When
                 running an application that uses the prototype, the
                 interpreter prints error messages that tell the
                 developer in which way the specification is incomplete
                 or inconsistent with a hand-coded implementation of the
                 class. We use case studies to demonstrate the
                 usefulness of our system.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "algebraic interpretation; algebraic specifications;
                 specification discovery",
}

@Article{Gencel:2008:FSM,
  author =       "Cigdem Gencel and Onur Demirors",
  title =        "Functional size measurement revisited",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1363102.1363106",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 25 08:43:45 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "There are various approaches to software size
                 measurement. Among these, the metrics and methods based
                 on measuring the functionality attribute have become
                 widely used since the original method was introduced in
                 1979. Although functional size measurement methods have
                 gone a long way, they still provide challenges for
                 software managers. This article identifies improvement
                 opportunities based on empirical studies we performed
                 on ongoing projects. We also compare our findings with
                 the extended dataset provided by the International
                 Software Benchmarking Standards Group (ISBSG).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "COSMIC FFP; functional size measurement; MkII FPA;
                 software benchmarking; software estimation",
}

@Article{Chen:2008:UBS,
  author =       "Tsong Yueh Chen and Robert Merkel",
  title =        "An upper bound on software testing effectiveness",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1363102.1363107",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 25 08:43:45 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Failure patterns describe typical ways in which inputs
                 revealing program failure are distributed across the
                 input domain --- in many cases, clustered together in
                 contiguous regions. Based on these observations several
                 debug testing methods have been developed. We examine
                 the upper bound of debug testing effectiveness
                 improvements possible through making assumptions about
                 the shape, size and orientation of failure patterns. We
                 consider the bounds for testing strategies with respect
                 to minimizing the F-measure, maximizing the P-measure,
                 and maximizing the E-measure. Surprisingly, we find
                 that the empirically measured effectiveness of some
                 existing methods that are not based on these
                 assumptions is close to the theoretical upper bound of
                 these strategies. The assumptions made to obtain the
                 upper bound, and its further implications, are also
                 examined.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "adaptive random testing; failure patterns;
                 failure-causing inputs; random testing; software
                 testing; testing effectiveness metrics",
}

@Article{Jalote:2008:PRR,
  author =       "Pankaj Jalote and Brendan Murphy and Vibhu Saujanya
                 Sharma",
  title =        "Post-release reliability growth in software products",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "4",
  pages =        "17:1--17:??",
  month =        aug,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/13487689.13487690",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Aug 20 14:07:07 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Most software reliability growth models work under the
                 assumption that reliability of software grows due to
                 the removal of bugs that cause failures. However,
                 another phenomenon has often been observed --- the
                 failure rate of a software product following its
                 release decreases with time even if no bugs are
                 corrected. In this article we present a simple model to
                 represent this phenomenon. We introduce the concept of
                 initial transient failure rate of the product and
                 assume that it decays with a factor $ \alpha $ per unit
                 time thereby increasing the product reliability with
                 time. When the transient failure rate decays away, the
                 product displays a steady state failure rate. We
                 discuss how the parameters in this model --- initial
                 transient failure rate, decay factor, and steady state
                 failure rate --- can be determined from the failure and
                 sales data of a product. We also describe how, using
                 the model, we can determine the product stabilization
                 time --- a product quality metric that describes how
                 long it takes a product to reach close to its stable
                 failure rate. We provide many examples where this model
                 has been applied to data from released products.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "post-release reliability growth; product stabilization
                 time",
}

@Article{Robillard:2008:TAS,
  author =       "Martin P. Robillard",
  title =        "Topology analysis of software dependencies",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "4",
  pages =        "18:1--18:??",
  month =        aug,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/13487689.13487691",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Aug 20 14:07:07 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Before performing a modification task, a developer
                 usually has to investigate the source code of a system
                 to understand how to carry out the task. Discovering
                 the code relevant to a change task is costly because it
                 is a human activity whose success depends on a large
                 number of unpredictable factors, such as intuition and
                 luck. Although studies have shown that effective
                 developers tend to explore a program by following
                 structural dependencies, no methodology is available to
                 guide their navigation through the thousands of
                 dependency paths found in a nontrivial program. We
                 describe a technique to automatically propose and rank
                 program elements that are potentially interesting to a
                 developer investigating source code. Our technique is
                 based on an analysis of the topology of structural
                 dependencies in a program. It takes as input a set of
                 program elements of interest to a developer and
                 produces a fuzzy set describing other elements of
                 potential interest. Empirical evaluation of our
                 technique indicates that it can help developers quickly
                 select program elements worthy of investigation while
                 avoiding less interesting ones.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "feature location; program understanding; separation of
                 concerns; software change; software evolution; software
                 navigation; static analysis",
}

@Article{Emmerich:2008:IRD,
  author =       "Wolfgang Emmerich and Mikio Aoyama and Joe Sventek",
  title =        "The impact of research on the development of
                 middleware technology",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "4",
  pages =        "19:1--19:??",
  month =        aug,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/13487689.13487692",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Aug 20 14:07:07 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The middleware market represents a sizable segment of
                 the overall Information and Communication Technology
                 market. In 2005, the annual middleware license revenue
                 was reported by Gartner to be in the region of \$8.5
                 billion. In this article we address the question
                 whether research had any involvement in the creation of
                 the technology that is being sold in this market? We
                 attempt a scholarly discourse. We present the research
                 method that we have applied to answer this question. We
                 then present a brief introduction into the key
                 middleware concepts that provide the foundation for
                 this market. It would not be feasible to investigate
                 any possible impact that research might have had.
                 Instead we select a few very successful technologies
                 that are representative for the middleware market as a
                 whole and show the existence of impact of research
                 results in the creation of these technologies. We
                 investigate the origins of Web services middleware,
                 distributed transaction processing middleware,
                 message-oriented middleware, distributed object
                 middleware and remote procedure call systems. For each
                 of these technologies we are able to show ample
                 influence of research and conclude that without the
                 research conducted by PhD students and researchers in
                 university computer science labs at Brown, CMU,
                 Cambridge, Newcastle, MIT, Vrije, and University of
                 Washington as well as research in industrial labs at
                 APM, AT\&T Bell Labs, DEC Systems Research, HP Labs,
                 IBM Research, and Xerox PARC we would not have
                 middleware technology in its current form. We summarise
                 the article by distilling lessons that can be learnt
                 from this evidenced impact for future technology
                 transfer undertakings.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Gruschke:2008:ROF,
  author =       "Tanja M. Gruschke and Magne J{\o}rgensen",
  title =        "The role of outcome feedback in improving the
                 uncertainty assessment of software development effort
                 estimates",
  journal =      j-TOSEM,
  volume =       "17",
  number =       "4",
  pages =        "20:1--20:??",
  month =        aug,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/13487689.13487693",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Aug 20 14:07:07 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Previous studies report that software developers are
                 over-confident in the accuracy of their effort
                 estimates. Aim: This study investigates the role of
                 outcome feedback, that is, feedback about the
                 discrepancy between the estimated and the actual
                 effort, in improving the uncertainty assessments.
                 Method: We conducted two in-depth empirical studies on
                 uncertainty assessment learning. Study 1 included five
                 student developers and Study 2, 10 software
                 professionals. In each study the developers repeatedly
                 assessed the uncertainty of their effort estimates of a
                 programming task, solved the task, and received
                 estimation accuracy outcome feedback. Results: We found
                 that most, but not all, developers were initially
                 over-confident in the accuracy of their effort
                 estimates and remained over-confident in spite of
                 repeated and timely outcome feedback. One important,
                 but not sufficient, condition for improvement based on
                 outcome feedback seems to be the use of explicitly
                 formulated, instead of purely intuition-based,
                 uncertainty assessment strategies.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "cost estimation; effort prediction intervals;
                 judgment-based uncertainty assessment; overconfidence;
                 software cost estimation; software development
                 management",
}

@Article{Karam:2008:ULT,
  author =       "Marcel R. Karam and Trevor J. Smedley and Sergiu M.
                 Dascalu",
  title =        "Unit-level test adequacy criteria for visual dataflow
                 languages and a testing methodology",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "1",
  pages =        "1:1--1:??",
  month =        sep,
  year =         "2008",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Oct 6 15:14:19 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Visual dataflow languages (VDFLs), which include
                 commercial and research systems, have had a substantial
                 impact on end-user programming. Like any other
                 programming languages, whether visual or textual, VDFLs
                 often contain faults. A desire to provide programmers
                 of these languages with some of the benefits of
                 traditional testing methodologies has been the driving
                 force behind our effort in this work. In this article
                 we introduce, in the context of Prograph, a testing
                 methodology for VDFLs based on structural test adequacy
                 criteria and coverage. This article also reports on the
                 results of two empirical studies. The first study was
                 conducted to obtain meaningful information about, in
                 particular, the effectiveness of our all-Dus criteria
                 in detecting a reasonable percentage of faults in
                 VDFLs. The second study was conducted to evaluate,
                 under the same criterion, the effectiveness of our
                 methodology in assisting users to visually localize
                 faults by reducing their search space. Both studies
                 were conducted using a testing system that we have
                 implemented in Prograph's IDE.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Louridas:2008:PLS,
  author =       "Panagiotis Louridas and Diomidis Spinellis and
                 Vasileios Vlachos",
  title =        "Power laws in software",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "1",
  pages =        "2:1--2:??",
  month =        sep,
  year =         "2008",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Oct 6 15:14:19 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "A single statistical framework, comprising power law
                 distributions and scale-free networks, seems to fit a
                 wide variety of phenomena. There is evidence that power
                 laws appear in software at the class and function
                 level. We show that distributions with long, fat tails
                 in software are much more pervasive than previously
                 established, appearing at various levels of
                 abstraction, in diverse systems and languages. The
                 implications of this phenomenon cover various aspects
                 of software engineering research and practice.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Lhotak:2008:EBC,
  author =       "Ond{\v{r}}ej Lhot{\'a}k and Laurie Hendren",
  title =        "Evaluating the benefits of context-sensitive points-to
                 analysis using a {BDD}-based implementation",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "1",
  pages =        "3:1--3:??",
  month =        sep,
  year =         "2008",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Oct 6 15:14:19 MDT 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We present Paddle, a framework of BDD-based
                 context-sensitive points-to and call graph analyses for
                 Java, as well as client analyses that use their
                 results. Paddle supports several variations of
                 context-sensitive analyses, including call site strings
                 and object sensitivity, and context-sensitively
                 specializes both pointer variables and the heap
                 abstraction. We empirically evaluate the precision of
                 these context-sensitive analyses on significant Java
                 programs. We find that object-sensitive analyses are
                 more precise than comparable variations of the other
                 approaches, and that specializing the heap abstraction
                 improves precision more than extending the length of
                 context strings.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Memon:2008:ARE,
  author =       "Atif M. Memon",
  title =        "Automatically repairing event sequence-based {GUI}
                 test suites for regression testing",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "2",
  pages =        "4:1--4:??",
  month =        nov,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1416563.1416564",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Nov 11 15:45:20 MST 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Although graphical user interfaces (GUIs) constitute a
                 large part of the software being developed today and
                 are typically created using rapid prototyping, there
                 are no effective regression testing techniques for
                 GUIs. The needs of GUI regression testing differ from
                 those of traditional software. When the structure of a
                 GUI is modified, test cases from the original GUI's
                 suite are either reusable or unusable on the modified
                 GUI. Because GUI test case generation is expensive, our
                 goal is to make the unusable test cases usable, thereby
                 helping to retain the suite's event coverage. The idea
                 of reusing these unusable ({\em obsolete\/}) test cases
                 has not been explored before. This article shows that a
                 large number of test cases become unusable for GUIs. It
                 presents a new GUI regression testing technique that
                 first automatically determines the usable and unusable
                 test cases from a test suite after a GUI modification,
                 then determines the unusable test cases that can be
                 repaired so that they can execute on the modified GUI,
                 and finally uses {\em repairing transformations\/} to
                 repair the test cases. This regression testing
                 technique along with four repairing transformations has
                 been implemented. An empirical study for four
                 open-source applications demonstrates that (1) this
                 approach is effective in that many of the test cases
                 can be repaired, and is practical in terms of its time
                 performance, (2) certain types of test cases are more
                 prone to becoming unusable, and (3) certain types of
                 ``dominator'' events, when modified, make a large
                 number of test cases unusable.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Graphical user interfaces; regression testing;
                 repairing test cases; test case management; test
                 maintenance",
}

@Article{Breaux:2008:SPP,
  author =       "Travis D. Breaux and Annie I. Ant{\'o}n and Jon
                 Doyle",
  title =        "Semantic parameterization: a process for modeling
                 domain descriptions",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "2",
  pages =        "5:1--5:??",
  month =        nov,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1416563.1416565",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Nov 11 15:45:20 MST 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Software engineers must systematically account for the
                 broad scope of environmental behavior, including
                 nonfunctional requirements, intended to coordinate the
                 actions of stakeholders and software systems. The
                 Inquiry Cycle Model (ICM) provides engineers with a
                 strategy to acquire and refine these requirements by
                 having domain experts answer six questions: who, what,
                 where, when, how, and why. Goal-based requirements
                 engineering has led to the formalization of
                 requirements to answer the ICM questions about {\em
                 when}, {\em how}, and {\em why\/} goals are achieved,
                 maintained, or avoided. In this article, we present a
                 systematic process called {\em Semantic
                 Parameterization\/} for expressing natural language
                 domain descriptions of goals as specifications in
                 description logic. The formalization of goals in
                 description logic allows engineers to automate
                 inquiries using {\em who}, {\em what}, and {\em
                 where\/} questions, completing the formalization of the
                 ICM questions. The contributions of this approach
                 include new theory to conceptually compare and
                 disambiguate goal specifications that enables querying
                 goals and organizing goals into specialization
                 hierarchies. The artifacts in the process include a
                 dictionary that aligns the domain lexicon with unique
                 concepts, distinguishing between synonyms and
                 polysemes, and several natural language patterns that
                 aid engineers in mapping common domain descriptions to
                 formal specifications. Semantic Parameterization has
                 been empirically validated in three case studies on
                 policy and regulatory descriptions that govern
                 information systems in the finance and health-care
                 domains.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "description logic; domain knowledge; formal
                 specification; Natural language",
}

@Article{Huang:2008:DSL,
  author =       "Shan Shan Huang and David Zook and Yannis
                 Smaragdakis",
  title =        "Domain-specific languages and program generation with
                 {meta-AspectJ}",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "2",
  pages =        "6:1--6:??",
  month =        nov,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1416563.1416566",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Nov 11 15:45:20 MST 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Meta-AspectJ (MAJ) is a language for generating
                 AspectJ programs using code templates. MAJ itself is an
                 extension of Java, so users can interleave arbitrary
                 Java code with AspectJ code templates. MAJ is a
                 structured metaprogramming tool: a well-typed generator
                 implies a syntactically correct generated program. MAJ
                 promotes a methodology that combines aspect-oriented
                 and generative programming. A valuable application is
                 in implementing small domain-specific language
                 extensions as generators using unobtrusive annotations
                 for syntax extension and AspectJ as a back-end. The
                 advantages of this approach are twofold. First, the
                 generator integrates into an existing software
                 application much as a regular API or library, instead
                 of as a language extension. Second, a mature language
                 implementation is easy to achieve with little effort
                 since AspectJ takes care of the low-level issues of
                 interfacing with the base Java language.\par

                 In addition to its practical value, MAJ offers valuable
                 insights to metaprogramming tool designers. It is a
                 mature metaprogramming tool for AspectJ (and, by
                 extension, Java): a lot of emphasis has been placed on
                 context-sensitive parsing and error reporting. As a
                 result, MAJ minimizes the number of metaprogramming
                 (quote/unquote) operators and uses type inference to
                 reduce the need to remember type names for syntactic
                 entities.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "domain-specific languages; language extensions;
                 Metaprogramming; program synthesis; program
                 transformation; program verification",
}

@Article{Xie:2008:UPS,
  author =       "Qing Xie and Atif M. Memon",
  title =        "Using a pilot study to derive a {GUI} model for
                 automated testing",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "2",
  pages =        "7:1--7:??",
  month =        nov,
  year =         "2008",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1416563.1416567",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Nov 11 15:45:20 MST 2008",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Graphical user interfaces (GUIs) are one of the most
                 commonly used parts of today's software. Despite their
                 ubiquity, testing GUIs for functional correctness
                 remains an understudied area. A typical GUI gives many
                 degrees of freedom to an end-user, leading to an
                 enormous {\em input event interaction space\/} that
                 needs to be tested. GUI test designers generate and
                 execute test cases (modeled as sequences of user {\em
                 events\/}) to traverse its parts; targeting a subspace
                 in order to maximize fault detection is a nontrivial
                 task. In this vein, in previous work, we used informal
                 GUI code examination and personal intuition to develop
                 an {\em event-interaction graph\/} (EIG). In this
                 article we empirically derive the EIG model via a pilot
                 study, and the resulting EIG validates our intuition
                 used in previous work; the empirical derivation process
                 also allows for model evolution as our understanding of
                 GUI faults improves. Results of the pilot study show
                 that events interact in complex ways; a GUI's response
                 to an event may vary depending on the {\em context\/}
                 established by preceding events and their execution
                 order. The EIG model helps testers to understand the
                 nature of interactions between GUI events when executed
                 in test cases and why certain events detect faults, so
                 that they can better traverse the event space. New test
                 adequacy criteria are defined for the EIG; new
                 algorithms use these criteria and EIG to systematically
                 generate test cases that are shown to be effective on
                 four fielded open-source applications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Graphical user interfaces; model-based testing; test
                 minimization; test suite management",
}

@Article{Notkin:2009:E,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "3",
  pages =        "8:1--8:??",
  month =        may,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1525880.1525881",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 3 16:34:58 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Bauer:2009:CER,
  author =       "Lujo Bauer and Jay Ligatti and David Walker",
  title =        "Composing expressive runtime security policies",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "3",
  pages =        "9:1--9:??",
  month =        may,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1525880.1525882",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 3 16:34:58 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Program monitors enforce security policies by
                 interposing themselves into the control flow of
                 untrusted software whenever that software attempts to
                 execute security-relevant actions. At the point of
                 interposition, a monitor has authority to permit or
                 deny (perhaps conditionally) the untrusted software's
                 attempted action. Program monitors are common security
                 enforcement mechanisms and integral parts of operating
                 systems, virtual machines, firewalls, network auditors,
                 and antivirus and antispyware tools.\par

                 Unfortunately, the runtime policies we require program
                 monitors to enforce grow more complex, both as the
                 monitored software is given new capabilities and as
                 policies are refined in response to attacks and user
                 feedback. We propose dealing with policy complexity by
                 organizing policies in such a way as to make them
                 composable, so that complex policies can be specified
                 more simply as compositions of smaller subpolicy
                 modules. We present a fully implemented language and
                 system called Polymer that allows security engineers to
                 specify and enforce composable policies on Java
                 applications. We formalize the central workings of
                 Polymer by defining an unambiguous semantics for our
                 language. Using this formalization, we state and prove
                 an uncircumventability theorem which guarantees that
                 monitors will intercept all security-relevant actions
                 of untrusted software.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Policy composition; policy enforcement;
                 policy-specification language",
}

@Article{Hall:2009:SRT,
  author =       "Tracy Hall and Nathan Baddoo and Sarah Beecham and
                 Hugh Robinson and Helen Sharp",
  title =        "A systematic review of theory use in studies
                 investigating the motivations of software engineers",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "3",
  pages =        "10:1--10:??",
  month =        may,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1525880.1525883",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 3 16:34:58 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Motivated software engineers make a critical
                 contribution to delivering successful software systems.
                 Understanding the motivations of software engineers and
                 the impact of motivation on software engineering
                 outcomes could significantly affect the industry's
                 ability to deliver good quality software systems.
                 Understanding the motivations of people generally in
                 relation to their work is underpinned by eight classic
                 motivation theories from the social sciences. We would
                 expect these classic motivation theories to play an
                 important role in developing a rigorous understanding
                 of the specific motivations of software engineers. In
                 this article we investigate how this theoretical basis
                 has been exploited in previous studies of software
                 engineering. We analyzed 92 studies of motivation in
                 software engineering that were published in the
                 literature between 1980 and 2006. Our main findings are
                 that many studies of software engineers' motivations
                 are not explicitly underpinned by reference to the
                 classic motivation theories. Furthermore, the findings
                 presented in these studies are often not explicitly
                 interpreted in terms of those theories, despite the
                 fact that in many cases there is a relationship between
                 those findings and the theories. Our conclusion is that
                 although there has been a great deal of previous work
                 looking at motivation in software engineering, the lack
                 of reference to classic theories of motivation means
                 that the current body of work in the area is weakened
                 and our understanding of motivation in software
                 engineering is not as rigorous as it may at first
                 appear. This weakness in the current state of knowledge
                 highlights important areas for future researchers to
                 contribute towards developing a rigorous and usable
                 body of knowledge in motivating software engineers.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Motivation; software engineering",
}

@Article{McMinn:2009:EEN,
  author =       "Phil McMinn and David Binkley and Mark Harman",
  title =        "Empirical evaluation of a nesting testability
                 transformation for evolutionary testing",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "3",
  pages =        "11:1--11:??",
  month =        may,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1525880.1525884",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 3 16:34:58 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Evolutionary testing is an approach to automating test
                 data generation that uses an evolutionary algorithm to
                 search a test object's input domain for test data.
                 Nested predicates can cause problems for evolutionary
                 testing, because information needed for guiding the
                 search only becomes available as each nested
                 conditional is satisfied. This means that the search
                 process can overfit to early information, making it
                 harder, and sometimes near impossible, to satisfy
                 constraints that only become apparent later in the
                 search. The article presents a testability
                 transformation that allows the evaluation of all nested
                 conditionals at once. Two empirical studies are
                 presented. The first study shows that the form of
                 nesting handled is prevalent in practice. The second
                 study shows how the approach improves evolutionary test
                 data generation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Evolutionary testing; search-based software
                 engineering; test data generation; testability
                 transformation",
}

@Article{Hamlet:2009:TES,
  author =       "Dick Hamlet",
  title =        "Tools and experiments supporting a testing-based
                 theory of component composition",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "3",
  pages =        "12:1--12:??",
  month =        may,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1525880.1525885",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Jun 3 16:34:58 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Development of software using off-the-shelf components
                 seems to offer a chance for improving product quality
                 and developer productivity. This article reviews a
                 foundational testing-based theory of component
                 composition, describes tools that implement the theory,
                 and presents experiments with functional and
                 nonfunctional component/system properties that validate
                 the theory and illuminate issues in component
                 composition.\par

                 The context for this work is an ideal form of
                 Component-Based Software Development (CBSD) supported
                 by tools. Component developers describe their
                 components by measuring approximations to functional
                 and nonfunctional behavior on a finite collection of
                 subdomains. Systems designers describe an
                 application-system structure by the component
                 connections that form it. From measured component
                 descriptions and a system structure, a CAD tool
                 synthesizes the system properties, predicting how the
                 system will behave. The system is not built, nor are
                 any test executions performed. Neither the component
                 sources nor executables are needed by systems
                 designers. From CAD calculations a designer can learn
                 (approximately) anything that could be learned by
                 testing an actual system implementation. The CAD tool
                 is often more efficient than it would be to assemble
                 and execute an actual system.\par

                 Using tools that support an ideal separation between
                 component- and system development, experiments were
                 conducted to investigate two related questions: (1) To
                 what extent can unit (that is, component) testing
                 replace system testing? (2) What properties of software
                 and subdomains influence the quality of subdomain
                 testing?",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "CAD tool support for CBSD; component-based software
                 development (CBSD); Experiments with composition of
                 software components; synthesis of system properties",
}

@Article{Goel:2009:IPC,
  author =       "Ankit Goel and Abhik Roychoudhury and P. S.
                 Thiagarajan",
  title =        "Interacting process classes",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "4",
  pages =        "13:1--13:??",
  month =        jul,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1538942.1538943",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 11 19:18:32 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many reactive control systems consist of classes of
                 active objects involving both intraclass interactions
                 (i.e., objects belonging to the same class interacting
                 with each other) and interclass interactions. Such
                 reactive control systems appear in domains such as
                 telecommunication, transportation and avionics. In this
                 article, we propose a modeling and simulation technique
                 for interacting process classes. Our modeling style
                 uses standard notations to capture behavior. In
                 particular, the control flow of a process class is
                 captured by a labeled transition system, unit
                 interactions between process objects are described as
                 {\em transactions}, and the structural relations are
                 captured via class diagrams. The key feature of our
                 approach is that our execution semantics leads to an
                 {\em abstract\/} simulation technique which involves
                 (i) grouping together active objects into equivalence
                 classes according their potential futures, and (ii)
                 keeping track of the number of objects in an
                 equivalence class rather than their identities. Our
                 simulation strategy is both time and memory efficient
                 and we demonstrate this on well-studied nontrivial
                 examples of reactive systems. We also present a case
                 study involving a weather-update controller from NASA
                 to demonstrate the use of our simulator for debugging
                 realistic designs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Abstract execution; active objects; message sequence
                 charts; Unified Modeling Language (UML)",
}

@Article{Hierons:2009:VFT,
  author =       "Robert M. Hierons",
  title =        "Verdict functions in testing with a fault domain or
                 test hypotheses",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "4",
  pages =        "14:1--14:??",
  month =        jul,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1538942.1538944",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 11 19:18:32 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In state-based testing, it is common to include
                 verdicts within test cases, the result of the test case
                 being the verdict reached by the test run. In addition,
                 approaches that reason about test effectiveness or
                 produce tests that are guaranteed to find certain
                 classes of faults are often based on either a fault
                 domain or a set of test hypotheses. This article
                 considers how the presence of a fault domain or test
                 hypotheses affects our notion of a test verdict. The
                 analysis reveals the need for new verdicts that provide
                 more information than the current verdicts and for
                 verdict functions that return a verdict based on a set
                 of test runs rather than a single test run. The
                 concepts are illustrated in the contexts of testing
                 from a nondeterministic finite state machine and the
                 testing of a datatype specified using an algebraic
                 specification language but are potentially relevant
                 whenever fault domains or test hypotheses are used.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "fault domains; test hypotheses; Test verdicts",
}

@Article{Mamei:2009:PPM,
  author =       "Marco Mamei and Franco Zambonelli",
  title =        "Programming pervasive and mobile computing
                 applications: {The TOTA} approach",
  journal =      j-TOSEM,
  volume =       "18",
  number =       "4",
  pages =        "15:1--15:??",
  month =        jul,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1538942.1538945",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 11 19:18:32 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Pervasive and mobile computing call for suitable
                 middleware and programming models to support the
                 activities of complex software systems in dynamic
                 network environments. In this article we present TOTA
                 (``Tuples On The Air''), a novel middleware and
                 programming approach for supporting adaptive
                 context-aware activities in pervasive and mobile
                 computing scenarios. The key idea in TOTA is to rely on
                 spatially distributed tuples, adaptively propagated
                 across a network on the basis of application-specific
                 rules, for both representing contextual information and
                 supporting uncoupled interactions between application
                 components. TOTA promotes a simple way of programming
                 that facilitates access to distributed information,
                 navigation in complex environments, and the achievement
                 of complex coordination tasks in a fully distributed
                 and adaptive way, mostly freeing programmers and system
                 managers from the need to take care of low-level issues
                 related to network dynamics. This article includes both
                 application examples to clarify concepts and
                 performance figures to show the feasibility of the
                 approach",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "coordination; middleware; mobile computing; Pervasive
                 computing; self-adaptation; self-organization; tuple
                 spaces",
}

@Article{Tilevich:2009:JOE,
  author =       "Eli Tilevich and Yannis Smaragdakis",
  title =        "{J-Orchestra}: {Enhancing} {Java} programs with
                 distribution capabilities",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "1",
  pages =        "1:1--1:??",
  month =        aug,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1555392.1555394",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 11 19:18:39 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "J-Orchestra is a system that enhances centralized Java
                 programs with distribution capabilities. Operating at
                 the bytecode level, J-Orchestra transforms a
                 centralized Java program (i.e., running on a single
                 Java Virtual Machine (JVM)) into a distributed one
                 (i.e., running across multiple JVMs). This
                 transformation effectively separates distribution
                 concerns from the core functionality of a program.
                 J-Orchestra follows a semiautomatic transformation
                 process. Through a GUI, the user selects program
                 elements (at class granularity) and assigns them to
                 network locations. Based on the user's input, the
                 J-Orchestra backend {\em automatically partitions\/}
                 the program through compiler-level techniques, without
                 changes to the JVM or to the Java Runtime Environment
                 (JRE) classes. By means of bytecode engineering and
                 code generation, J-Orchestra substitutes method calls
                 with remote method calls, direct object references with
                 proxy references, etc. It also translates Java language
                 features (e.g., static methods and fields, inheritance,
                 inner classes, new object construction, etc.) for
                 efficient distributed execution.\par

                 We detail the main technical issues that J-Orchestra
                 addresses, including its mechanism for program
                 transformation in the presence of unmodifiable code
                 (e.g., in JRE classes) and the translation of
                 concurrency and synchronization constructs to work
                 correctly over the network. We further discuss a case
                 study of transforming a large, commercial, third-party
                 application for efficient execution in a client server
                 environment and outline the architectural
                 characteristics of centralized programs that are
                 amenable to automated distribution with J-Orchestra.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "bytecode engineering; distributed computing; Java;
                 middleware; RMI; Separation of concerns",
}

@Article{Ouyang:2009:BPM,
  author =       "Chun Ouyang and Marlon Dumas and Wil M. P. {Van Der
                 Aalst} and Arthur H. M. {Ter Hofstede} and Jan
                 Mendling",
  title =        "From business process models to process-oriented
                 software systems",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "1",
  pages =        "2:1--2:??",
  month =        aug,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1555392.1555395",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 11 19:18:39 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Several methods for enterprise systems analysis rely
                 on flow-oriented representations of business
                 operations, otherwise known as business process models.
                 The Business Process Modeling Notation (BPMN) is a
                 standard for capturing such models. BPMN models
                 facilitate communication between domain experts and
                 analysts and provide input to software development
                 projects. Meanwhile, there is an emergence of methods
                 for enterprise software development that rely on
                 detailed process definitions that are executed by
                 process engines. These process definitions refine their
                 counterpart BPMN models by introducing data
                 manipulation, application binding, and other
                 implementation details. The de facto standard for
                 defining executable processes is the Business Process
                 Execution Language (BPEL). Accordingly, a
                 standards-based method for developing process-oriented
                 systems is to start with BPMN models and to translate
                 these models into BPEL definitions for subsequent
                 refinement. However, instrumenting this method is
                 challenging because BPMN models and BPEL definitions
                 are structurally very different. Existing techniques
                 for translating BPMN to BPEL only work for limited
                 classes of BPMN models. This article proposes a
                 translation technique that does not impose structural
                 restrictions on the source BPMN model. At the same
                 time, the technique emphasizes the generation of
                 readable (block-structured) BPEL code. An empirical
                 evaluation conducted over a large collection of process
                 models shows that the resulting BPEL definitions are
                 largely block-structured. Beyond its direct relevance
                 in the context of BPMN and BPEL, the technique
                 presented in this article addresses issues that arise
                 when translating from graph-oriented to block-structure
                 flow definition languages.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "BPEL; BPMN; Business process modeling; Web services",
}

@Article{Rajan:2009:UAO,
  author =       "Hridesh Rajan and Kevin J. Sullivan",
  title =        "Unifying aspect- and object-oriented design",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "1",
  pages =        "3:1--3:??",
  month =        aug,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1555392.1555396",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 11 19:18:39 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The contribution of this work is the design and
                 evaluation of a programming language model that unifies
                 aspects and classes as they appear in AspectJ-like
                 languages. We show that our model preserves the
                 capabilities of AspectJ-like languages, while improving
                 the conceptual integrity of the language model and the
                 compositionality of modules. The improvement in
                 conceptual integrity is manifested by the reduction of
                 specialized constructs in favor of uniform orthogonal
                 constructs. The enhancement in compositionality is
                 demonstrated by better modularization of integration
                 and higher-order crosscutting concerns.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "aspect-oriented programming; binding; Classpect; Eos;
                 first class aspect instances; instance-level advising;
                 unified aspect language model",
}

@Article{Tan:2009:CDM,
  author =       "Hee Beng Kuan Tan and Yuan Zhao and Hongyu Zhang",
  title =        "Conceptual data model-based software size estimation
                 for information systems",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "2",
  pages =        "4:1--4:??",
  month =        oct,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1571629.1571630",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 9 20:39:35 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Size estimation plays a key role in effort estimation
                 that has a crucial impact on software projects in the
                 software industry. Some information required by
                 existing software sizing methods is difficult to
                 predict in the early stage of software development. A
                 conceptual data model is widely used in the early stage
                 of requirements analysis for information systems. Lines
                 of code (LOC) is a commonly used software size measure.
                 This article proposes a novel LOC estimation method for
                 information systems from their conceptual data models
                 through using a multiple linear regression model. We
                 have validated the proposed method using samples from
                 both the software industry and open-source systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "conceptual data model; line of code (LOC); multiple
                 linear regression model; Software sizing",
}

@Article{Masri:2009:MSI,
  author =       "Wes Masri and Andy Podgurski",
  title =        "Measuring the strength of information flows in
                 programs",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "2",
  pages =        "5:1--5:??",
  month =        oct,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1571629.1571631",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 9 20:39:35 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "{\em Dynamic information flow analysis\/} (DIFA) was
                 devised to enable the flow of information among
                 variables in an executing program to be monitored and
                 possibly regulated. It is related to techniques like
                 {\em dynamic slicing\/} and {\em dynamic impact
                 analysis}. To better understand the basis for DIFA, we
                 conducted an empirical study in which we measured the
                 {\em strength\/} of information flows identified by
                 DIFA, using information theoretic and correlation-based
                 methods. The results indicate that in most cases the
                 occurrence of a chain of dynamic program dependences
                 between two variables does {\em not\/} indicate a
                 measurable information flow between them. We also
                 explored the relationship between the strength of an
                 information flow and the {\em length\/} of the
                 corresponding dependence chain, and we obtained results
                 indicating that no consistent relationship exists
                 between the length of an information flow and its
                 strength. Finally, we investigated whether data
                 dependence and control dependence makes equal or
                 unequal contributions to flow strength. The results
                 indicate that flows due to data dependences alone are
                 stronger, on average, than flows due to control
                 dependences alone. We present the details of our study
                 and consider the implications of the results for
                 applications of DIFA and related techniques.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "correlation; Dynamic information flow analysis;
                 dynamic slicing; entropy; information flow length;
                 information flow strength; information leakage; program
                 dependence",
}

@Article{Desai:2009:AMM,
  author =       "Nirmit Desai and Amit K. Chopra and Munindar P.
                 Singh",
  title =        "{Amoeba}: a methodology for modeling and evolving
                 cross-organizational business processes",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "2",
  pages =        "6:1--6:??",
  month =        oct,
  year =         "2009",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1571629.1571632",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Oct 9 20:39:35 MDT 2009",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Business service engagements involve processes that
                 extend across two or more autonomous organizations.
                 Because of regulatory and competitive reasons,
                 requirements for cross-organizational business
                 processes often evolve in subtle ways. The changes may
                 concern the business transactions supported by a
                 process, the organizational structure of the parties
                 participating in the process, or the contextual
                 policies that apply to the process. Current business
                 process modeling approaches handle such changes in an
                 ad hoc manner, and lack a principled means for
                 determining what needs to be changed and where.
                 Cross-organizational settings exacerbate the
                 shortcomings of traditional approaches because changes
                 in one organization can potentially affect the workings
                 of another.\par

                 This article describes Amoeba, a methodology for
                 business processes that is based on {\em business
                 protocols}. Protocols capture the business meaning of
                 interactions among autonomous parties via commitments.
                 Amoeba includes guidelines for (1) specifying
                 cross-organizational processes using business
                 protocols, and (2) handling the evolution of
                 requirements via a novel application of protocol
                 composition. This article evaluates Amoeba using
                 enhancements of a real-life business scenario of
                 auto-insurance claim processing, and an aerospace case
                 study.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Business process modeling; business protocols;
                 requirements evolution",
}

@Article{Notkin:2010:E,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "3",
  pages =        "7:1--7:??",
  month =        jan,
  year =         "2010",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 15 13:32:11 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Whittle:2010:SHS,
  author =       "Jon Whittle and Praveen K. Jayaraman",
  title =        "Synthesizing hierarchical state machines from
                 expressive scenario descriptions",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "3",
  pages =        "8:1--8:??",
  month =        jan,
  year =         "2010",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 15 13:32:11 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Xu:2010:PCC,
  author =       "Chang Xu and S. C. Cheung and W. K. Chan and Chunyang
                 Ye",
  title =        "Partial constraint checking for context consistency in
                 pervasive computing",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "3",
  pages =        "9:1--9:??",
  month =        jan,
  year =         "2010",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 15 13:32:11 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Brogi:2010:DIS,
  author =       "Antonio Brogi and Razvan Popescu and Matteo Tanca",
  title =        "Design and implementation of {Sator}: a {Web} service
                 aggregator",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "3",
  pages =        "10:1--10:??",
  month =        jan,
  year =         "2010",
  CODEN =        "ATSMER",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 15 13:32:11 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ponge:2010:AAT,
  author =       "Julien Ponge and Boualem Benatallah and Fabio Casati
                 and Farouk Toumani",
  title =        "Analysis and applications of timed service protocols",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "4",
  pages =        "11:1--11:??",
  month =        apr,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1734229.1734230",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 21 11:41:14 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Web services are increasingly gaining acceptance as a
                 framework for facilitating application-to-application
                 interactions within and across enterprises. It is
                 commonly accepted that a service description should
                 include not only the interface, but also the business
                 protocol supported by the service. The present work
                 focuses on the formalization of an important category
                 of protocols that includes time-related constraints
                 (called {\em timed protocols\/}), and the impact of
                 time on compatibility and replaceability analysis. We
                 formalized the following timing constraints: C-Invoke
                 constraints define time windows within which a service
                 operation can be invoked while M-Invoke constraints
                 define expiration deadlines. We extended techniques for
                 compatibility and replaceability analysis between timed
                 protocols by using a semantic-preserving mapping
                 between timed protocols and timed automata, leading to
                 the identification of a novel class of timed automata,
                 called {\em protocol timed automata\/} (PTA). PTA
                 exhibit a particular kind of silent transition that
                 strictly increase the expressiveness of the model, yet
                 they are closed under complementation, making every
                 type of compatibility or replaceability analysis
                 decidable. Finally, we implemented our approach in the
                 context of a larger project called ServiceMosaic, a
                 model-driven framework for Web service life-cycle
                 management.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "compatibility and replaceability analysis; timed
                 automata; timed business protocols; Web services",
}

@Article{Payton:2010:SSA,
  author =       "Jamie Payton and Christine Julien and Gruia-Catalin
                 Roman and Vasanth Rajamani",
  title =        "Semantic self-assessment of query results in dynamic
                 environments",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "4",
  pages =        "12:1--12:??",
  month =        apr,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1734229.1734231",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 21 11:41:14 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Queries are convenient abstractions for the discovery
                 of information and services, as they offer
                 content-based information access. In distributed
                 settings, query semantics are well-defined, for
                 example, queries are often designed to satisfy ACID
                 transactional properties. When query processing is
                 introduced in a dynamic network setting, achieving
                 transactional semantics becomes complex due to the open
                 and unpredictable environment. In this article, we
                 propose a query processing model for mobile ad hoc and
                 sensor networks that is suitable for expressing a wide
                 range of query semantics; the semantics differ in the
                 degree of consistency with which query results reflect
                 the state of the environment during query execution. We
                 introduce several distinct notions of consistency and
                 formally express them in our model. A practical and
                 significant contribution of this article is a protocol
                 for query processing that automatically assesses and
                 adaptively provides an achievable degree of consistency
                 given the operational environment throughout its
                 execution. The protocol attaches an assessment of the
                 achieved guarantee to returned query results, allowing
                 precise reasoning about a query with a range of
                 possible semantics. We evaluate the performance of this
                 protocol and demonstrate the benefits accrued to
                 applications through examples drawn from an industrial
                 application.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "consistency; mobile computing; Query processing",
}

@Article{Chen:2010:VSI,
  author =       "Chunqing Chen and Jin Song Dong and Jun Sun and Andrew
                 Martin",
  title =        "A verification system for interval-based specification
                 languages",
  journal =      j-TOSEM,
  volume =       "19",
  number =       "4",
  pages =        "13:1--13:??",
  month =        apr,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1734229.1734232",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 21 11:41:14 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Interval-based specification languages have been used
                 to formally model and rigorously reason about real-time
                 computing systems. This usually involves logical
                 reasoning and mathematical computation with respect to
                 continuous or discrete time. When these systems are
                 complex, analyzing their models by hand becomes
                 error-prone and difficult. In this article, we develop
                 a verification system to facilitate the formal analysis
                 of interval-based specification languages with
                 machine-assisted proof support. The verification system
                 is developed using a generic theorem prover, Prototype
                 Verification System (PVS). Our system elaborately
                 encodes a highly expressive set-based notation, Timed
                 Interval Calculus (TIC), and can rigorously carry out
                 the verification of TIC models at an interval level. We
                 validated all TIC reasoning rules and discovered subtle
                 flaws in the original rules. We also apply TIC to model
                 Duration Calculus (DC), which is a popular
                 interval-based specification language, and thus expand
                 the capacity of the verification system. We can check
                 the correctness of DC axioms, and execute DC proofs in
                 a manner similar to the corresponding pencil-and-paper
                 DC arguments.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Formal specification languages; real-time systems;
                 theorem proving",
}

@Article{Steimann:2010:TMI,
  author =       "Friedrich Steimann and Thomas Pawlitzki and Sven Apel
                 and Christian K{\"a}stner",
  title =        "Types and modularity for implicit invocation with
                 implicit announcement",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "1",
  pages =        "1:1--1:??",
  month =        jun,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1767751.1767752",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Jul 6 16:17:49 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Through implicit invocation, procedures are called
                 without explicitly referencing them. Implicit
                 announcement adds to this implicitness by not only
                 keeping implicit which procedures are called, but also
                 where or when --- under implicit invocation with
                 implicit announcement, the call site contains no signs
                 of that, or what it calls. Recently, aspect-oriented
                 programming has popularized implicit invocation with
                 implicit announcement as a possibility to separate
                 concerns that lead to interwoven code if conventional
                 programming techniques are used. However, as has been
                 noted elsewhere, as currently implemented it
                 establishes strong implicit dependencies between
                 components, hampering independent software development
                 and evolution. To address this problem, we present a
                 type-based modularization of implicit invocation with
                 implicit announcement that is inspired by how
                 interfaces and exceptions are realized in Java. By
                 extending an existing compiler and by rewriting several
                 programs to make use of our proposed language
                 constructs, we found that the imposed declaration
                 clutter tends to be moderate; in particular, we found
                 that, for general applications of implicit invocation
                 with implicit announcement, fears that programs
                 utilizing our form of modularization become
                 unreasonably verbose are unjustified.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "aspect-oriented programming; event-driven programming;
                 Implicit invocation; modularity; publish/subscribe;
                 typing",
}

@Article{Conboy:2010:MDC,
  author =       "Kieran Conboy and Brian Fitzgerald",
  title =        "Method and developer characteristics for effective
                 agile method tailoring: a study of {XP} expert
                 opinion",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "1",
  pages =        "2:1--2:??",
  month =        jun,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1767751.1767753",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Jul 6 16:17:49 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "It has long been acknowledged that software methods
                 should be tailored if they are to achieve optimum
                 effect. However comparatively little research has been
                 carried out to date on this topic in general, and more
                 notably, on agile methods in particular. This dearth of
                 evidence in the case of agile methods is especially
                 significant in that it is reasonable to expect that
                 such methods would particularly lend themselves to
                 tailoring. In this research, we present a framework
                 based on interviews with 20 senior software development
                 researchers and a review of the extant literature. The
                 framework is comprised of two sets of factors ---
                 characteristics of the method, and developer practices
                 --- that can improve method tailoring effectiveness.
                 Drawing on the framework, we then interviewed 16 expert
                 XP practitioners to examine the current state and
                 effectiveness of XP tailoring efforts, and to shed
                 light on issues the framework identified as being
                 important. The article concludes with a set of
                 recommendations for research and practice that would
                 advance our understanding of the method tailoring
                 area.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "agile method; contingency; engineering; expert
                 opinion; Extreme programming; software development;
                 tailoring; XP",
}

@Article{Duala-Ekoko:2010:CRD,
  author =       "Ekwa Duala-Ekoko and Martin P. Robillard",
  title =        "Clone region descriptors: {Representing} and tracking
                 duplication in source code",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "1",
  pages =        "3:1--3:??",
  month =        jun,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1767751.1767754",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Jul 6 16:17:49 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Source code duplication, commonly known as {\em code
                 cloning}, is considered an obstacle to software
                 maintenance because changes to a cloned region often
                 require consistent changes to other regions of the
                 source code. Research has provided evidence that the
                 elimination of clones may not always be practical,
                 feasible, or cost-effective. We present a clone
                 management approach that describes clone regions in a
                 robust way that is independent from the exact text of
                 clone regions or their location in a file, and that
                 provides support for tracking clones in evolving
                 software. Our technique relies on the concept of
                 abstract {\em clone region descriptors\/} (CRDs), which
                 describe clone regions using a combination of their
                 syntactic, structural, and lexical information. We
                 present our definition of CRDs, and describe a clone
                 tracking system capable of producing CRDs from the
                 output of different clone detection tools, notifying
                 developers of modifications to clone regions, and
                 supporting updates to the documented clone
                 relationships. We evaluated the performance and
                 usefulness of our approach across three clone detection
                 tools and five subject systems, and the results
                 indicate that CRDs are a practical and robust
                 representation for tracking code clones in evolving
                 software.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "clone detection; clone management; code clones;
                 refactoring; Source code duplication",
}

@Article{Ko:2010:EAW,
  author =       "Andrew J. Ko and Brad A. Myers",
  title =        "Extracting and answering why and why not questions
                 about {Java} program output",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "2",
  pages =        "4:1--4:??",
  month =        aug,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1824760.1824761",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Sep 8 18:47:44 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "When software developers want to understand the reason
                 for a program's behavior, they must translate their
                 questions about the behavior into a series of questions
                 about code, speculating about the causes in the
                 process. The Whyline is a new kind of debugging tool
                 that avoids such speculation by instead enabling
                 developers to select a question about program output
                 from a set of ``why did and why didn't'' questions
                 extracted from the program's code and execution. The
                 tool then finds one or more possible explanations for
                 the output in question. These explanations are derived
                 using a static and dynamic slicing, precise call
                 graphs, reachability analyses, and new algorithms for
                 determining potential sources of values. Evaluations of
                 the tool on two debugging tasks showed that developers
                 with the Whyline were three times more successful and
                 twice as fast at debugging, compared to developers with
                 traditional breakpoint debuggers. The tool has the
                 potential to simplify debugging and program
                 understanding in many software development contexts.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "debugging; questions; Whyline",
}

@Article{Sullivan:2010:MAO,
  author =       "Kevin Sullivan and William G. Griswold and Hridesh
                 Rajan and Yuanyuan Song and Yuanfang Cai and Macneil
                 Shonle and Nishit Tewari",
  title =        "Modular aspect-oriented design with {XPIs}",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "2",
  pages =        "5:1--5:??",
  month =        aug,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1824760.1824762",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Sep 8 18:47:44 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The emergence of aspect-oriented programming (AOP)
                 languages has provided software designers with new
                 mechanisms and strategies for decomposing programs into
                 modules and composing modules into systems. What we do
                 not yet fully understand is how best to use such
                 mechanisms consistent with common modularization
                 objectives such as the comprehensibility of programming
                 code, its parallel development, dependability, and ease
                 of change. The main contribution of this work is a new
                 form of information-hiding interface for AOP that we
                 call the crosscut programming interface, or XPI. XPIs
                 abstract crosscutting behaviors and make these
                 abstractions explicit. XPIs can be used, albeit with
                 limited enforcement of interface rules, with existing
                 AOP languages, such as AspectJ. To evaluate our notion
                 of XPIs, we have applied our XPI-based design
                 methodology to a medium-sized network overlay
                 application called Hypercast. A qualitative and
                 quantitative analysis of existing AO design methods and
                 XPI-based design method shows that our approach
                 produces improvements in program comprehensibility, in
                 opportunities for parallel development, and in the ease
                 when code can be developed and changed.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "Aspect-oriented programming; design rules; options",
}

@Article{Singh:2010:SWE,
  author =       "Param Vir Singh",
  title =        "The small-world effect: {The} influence of macro-level
                 properties of developer collaboration networks on
                 open-source project success",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "2",
  pages =        "6:1--6:??",
  month =        aug,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1824760.1824763",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Sep 8 18:47:44 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In this study we investigate the impact of
                 community-level networks --- relationships that exist
                 among developers in an OSS community --- on the
                 productivity of member developers. Specifically, we
                 argue that OSS community networks characterized by {\em
                 small-world\/} properties would positively influence
                 the productivity of the member developers by providing
                 them with speedy and reliable access to more quantity
                 and variety of information and knowledge resources.
                 Specific hypotheses are developed and tested using
                 longitudinal data on a large panel of 4,279 projects
                 from 15 different OSS communities hosted at
                 Sourceforge. Our results suggest that significant
                 variation exists in small-world properties of OSS
                 communities at Sourceforge. After accounting for
                 project, foundry, and time-specific observed and
                 unobserved effects, we found a statistically
                 significant relationship between small-world properties
                 of a community and the technical and commercial success
                 of the software produced by its members. In contrast to
                 the findings of prior research, we also found the lack
                 of a significant relationship between closeness and
                 betweenness centralities of the project teams and their
                 success. These results were robust to a number of
                 controls and model specifications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "collaborative software development; online community;
                 Open source software development; productivity; small
                 world networks; social networks; team formation",
}

@Article{Dyer:2010:SDA,
  author =       "Robert Dyer and Hridesh Rajan",
  title =        "Supporting dynamic aspect-oriented features",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "2",
  pages =        "7:1--7:??",
  month =        aug,
  year =         "2010",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/1824760.1824764",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Sep 8 18:47:44 MDT 2010",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Dynamic aspect-oriented (AO) features have important
                 software engineering benefits such as allowing
                 unanticipated software evolution and maintenance. It is
                 thus important to efficiently support these features in
                 language implementations. Current implementations incur
                 unnecessary design-time and runtime overhead due to the
                 lack of support in underlying intermediate language
                 (IL) models. To address this problem, we present a
                 flexible and dynamic IL model that we call {\em Nu}.
                 The {\em Nu\/} model provides a higher level of
                 abstraction compared to traditional object-oriented
                 ILs, making it easier to efficiently support dynamic AO
                 features. We demonstrate these benefits by providing an
                 industrial-strength VM implementation for {\em Nu}, by
                 showing translation strategies from dynamic
                 source-level constructs to {\em Nu\/} and by analyzing
                 the performance of the resulting IL code.\par

                 {\em Nu\/}'s VM extends the Sun Hotspot VM interpreter
                 and uses a novel caching mechanism to significantly
                 reduce the amortized costs of join point dispatch. Our
                 evaluation using standard benchmarks shows that the
                 overhead of supporting a dynamic deployment model can
                 be reduced to as little as $ \approx $1.5\%. {\em Nu\/}
                 provides an improved compilation target for dynamic
                 deployment features, which makes it easier to support
                 such features with corresponding software engineering
                 benefits in software evolution and maintenance and in
                 runtime verification.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "aspect-oriented intermediate-languages;
                 aspect-oriented virtual machines; invocation; Nu;
                 weaving",
}

@Article{Miles:2011:PMD,
  author =       "Simon Miles and Paul Groth and Steve Munroe and Luc
                 Moreau",
  title =        "{PrIMe}: a methodology for developing provenance-aware
                 applications",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "3",
  pages =        "8:1--8:??",
  month =        aug,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000791.2000792",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 23 18:32:12 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Provenance refers to the past processes that brought
                 about a given (version of an) object, item or entity.
                 By knowing the provenance of data, users can often
                 better understand, trust, reproduce, and validate it. A
                 provenance-aware application has the functionality to
                 answer questions regarding the provenance of the data
                 it produces, by using documentation of past processes.
                 PrIMe is a software engineering technique for adapting
                 application designs to enable them to interact with a
                 provenance middleware layer, thereby making them
                 provenance-aware.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Chen:2011:TDB,
  author =       "Jinjun Chen and Yun Yang",
  title =        "Temporal dependency-based checkpoint selection for
                 dynamic verification of temporal constraints in
                 scientific workflow systems",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "3",
  pages =        "9:1--9:??",
  month =        aug,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000791.2000793",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 23 18:32:12 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In a scientific workflow system, a checkpoint
                 selection strategy is used to select checkpoints along
                 scientific workflow execution for verifying temporal
                 constraints so that we can identify any temporal
                 violations and handle them in time in order to ensure
                 overall temporal correctness of the execution that is
                 often essential for the usefulness of execution
                 results. The problem of existing representative
                 strategies is that they do not differentiate temporal
                 constraints as, once a checkpoint is selected, they
                 verify all temporal constraints. However, such a
                 checkpoint does not need to be taken for those
                 constraints whose consistency can be deduced from
                 others. The corresponding verification of such
                 constraints is consequently unnecessary and can
                 severely impact overall temporal verification
                 efficiency while the efficiency determines whether
                 temporal violations can be identified quickly for
                 handling in time.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Anvik:2011:REB,
  author =       "John Anvik and Gail C. Murphy",
  title =        "Reducing the effort of bug report triage: Recommenders
                 for development-oriented decisions",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "3",
  pages =        "10:1--10:??",
  month =        aug,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000791.2000794",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 23 18:32:12 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "A key collaborative hub for many software development
                 projects is the bug report repository. Although its use
                 can improve the software development process in a
                 number of ways, reports added to the repository need to
                 be triaged. A triager determines if a report is
                 meaningful. Meaningful reports are then organized for
                 integration into the project's development process. To
                 assist triagers with their work, this article presents
                 a machine learning approach to create recommenders that
                 assist with a variety of decisions aimed at
                 streamlining the development process. The recommenders
                 created with this approach are accurate; for instance,
                 recommenders for which developer to assign a report
                 that we have created using this approach have a
                 precision between 70\% and 98\% over five open source
                 projects.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Naish:2011:MSB,
  author =       "Lee Naish and Hua Jie Lee and Kotagiri Ramamohanarao",
  title =        "A model for spectra-based software diagnosis",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "3",
  pages =        "11:1--11:??",
  month =        aug,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000791.2000795",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 23 18:32:12 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "This article presents an improved approach to assist
                 diagnosis of failures in software (fault localisation)
                 by ranking program statements or blocks in accordance
                 with to how likely they are to be buggy. We present a
                 very simple single-bug program to model the problem. By
                 examining different possible execution paths through
                 this model program over a number of test cases, the
                 effectiveness of different proposed spectral ranking
                 methods can be evaluated in idealised conditions. The
                 results are remarkably consistent to those arrived at
                 empirically using the Siemens test suite and Space
                 benchmarks. The model also helps identify groups of
                 metrics that are equivalent for ranking.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Binkley:2011:FTT,
  author =       "David W. Binkley and Mark Harman and Kiran Lakhotia",
  title =        "{FlagRemover}: a testability transformation for
                 transforming loop-assigned flags",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "3",
  pages =        "12:1--12:??",
  month =        aug,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000791.2000796",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 23 18:32:12 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Search-Based Testing is a widely studied technique for
                 automatically generating test inputs, with the aim of
                 reducing the cost of software engineering activities
                 that rely upon testing. However, search-based
                 approaches degenerate to random testing in the presence
                 of flag variables, because flags create spikes and
                 plateaux in the fitness landscape. Both these features
                 are known to denote hard optimization problems for all
                 search-based optimization techniques. Several authors
                 have studied flag removal transformations and fitness
                 function refinements to address the issue of flags, but
                 the problem of loop-assigned flags remains unsolved.
                 This article introduces a testability transformation
                 along with a tool that transforms programs with
                 loop-assigned flags into flag-free equivalents, so that
                 existing search-based test data generation approaches
                 can successfully be applied.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Chen:2011:RFC,
  author =       "Zhenyu Chen and Tsong Yueh Chen and Baowen Xu",
  title =        "A revisit of fault class hierarchies in general
                 {Boolean} specifications",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "3",
  pages =        "13:1--13:??",
  month =        aug,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000791.2000797",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Aug 23 18:32:12 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Recently, Kapoor and Bowen [2007] have extended the
                 works by Kuhn [1999], Tsuchiya and Kikuno [2002], and
                 Lau and Yu [2005]. However, their proofs overlook the
                 possibility that a mutant of the Boolean specifications
                 under test may be equivalent. Hence, each of their
                 fault relationships is either incorrect or has an
                 incorrect proof. In this article, we give
                 counterexamples to the incorrect fault relationships
                 and provide new proofs for the valid fault
                 relationships. Furthermore, a co-stronger fault
                 relation is introduced to establish a new fault class
                 hierarchy for general Boolean specifications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Bauer:2011:RVL,
  author =       "Andreas Bauer and Martin Leucker and Christian
                 Schallhart",
  title =        "Runtime Verification for {LTL} and {TLTL}",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "4",
  pages =        "14:1--14:??",
  month =        sep,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000799.2000800",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Sep 26 17:32:55 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "This article studies runtime verification of
                 properties expressed either in lineartime temporal
                 logic (LTL) or timed lineartime temporal logic (TLTL).
                 It classifies runtime verification in identifying its
                 distinguishing features to model checking and testing,
                 respectively. It introduces a three-valued semantics
                 (with truth values true, false, inconclusive) as an
                 adequate interpretation as to whether a partial
                 observation of a running system meets an LTL or TLTL
                 property. For LTL, a conceptually simple monitor
                 generation procedure is given, which is optimal in two
                 respects: First, the size of the generated
                 deterministic monitor is minimal, and, second, the
                 monitor identifies a continuously monitored trace as
                 either satisfying or falsifying a property as early as
                 possible.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Nie:2011:MFC,
  author =       "Changhai Nie and Hareton Leung",
  title =        "The Minimal Failure-Causing Schema of Combinatorial
                 Testing",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "4",
  pages =        "15:1--15:??",
  month =        sep,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000799.2000801",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Sep 26 17:32:55 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Combinatorial Testing (CT) involves the design of a
                 small test suite to cover the parameter value
                 combinations so as to detect failures triggered by the
                 interactions among these parameters. To make full use
                 of CT and to extend its advantages, this article first
                 gives a model of CT and then presents a theory of the
                 Minimal Failure-causing Schema (MFS), including the
                 concept of the MFS, proof of its existence, some of its
                 properties, and a method of finding the MFS. Then we
                 propose a methodology for CT based on this MFS theory
                 and the existing research. Our MFS-based methodology
                 emphasizes that CT should work on accurate testing
                 requirements, and has the following advantages: (1)
                 Detect failure to the greatest degree with the least
                 cost.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Gandhi:2011:DMC,
  author =       "R. A. Gandhi and S. W. Lee",
  title =        "Discovering Multidimensional Correlations among
                 Regulatory Requirements to Understand Risk",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "4",
  pages =        "16:1--16:??",
  month =        sep,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000799.2000802",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Sep 26 17:32:55 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Security breaches most often occur due to a cascading
                 effect of failure among security constraints that
                 collectively contribute to overall secure system
                 behavior in a socio-technical environment. Therefore,
                 during security certification activities, analysts must
                 systematically take into account the nexus of causal
                 chains that exist among security constraints imposed by
                 regulatory requirements. Numerous regulatory
                 requirements specified in natural language documents or
                 listed in spreadsheets/databases do not facilitate such
                 analysis. The work presented in this article outlines a
                 stepwise methodology to discover and understand the
                 multidimensional correlations among regulatory
                 requirements for the purpose of understanding the
                 potential for risk due to noncompliance during system
                 operation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Dehlinger:2011:GPP,
  author =       "Josh Dehlinger and Robyn R. Lutz",
  title =        "{Gaia-PL}: a Product Line Engineering Approach for
                 Efficiently Designing Multiagent Systems",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "4",
  pages =        "17:1--17:??",
  month =        sep,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000799.2000803",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Sep 26 17:32:55 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Agent-oriented software engineering (AOSE) has
                 provided powerful and natural, high-level abstractions
                 in which software developers can understand, model and
                 develop complex, distributed systems. Yet, the
                 realization of AOSE partially depends on whether
                 agent-based software systems can achieve reductions in
                 development time and cost similar to other
                 reuse-conscious development methods. Specifically, AOSE
                 does not adequately address requirements specifications
                 as reusable assets. Software product line engineering
                 is a reuse technology that supports the systematic
                 development of a set of similar software systems
                 through understanding, controlling, and managing their
                 common, core characteristics and their differing
                 variation points. In this article, we present an
                 extension to the Gaia AOSE methodology, named Gaia-PL
                 (Gaia-Product Line), for agent-based distributed
                 software systems that enables requirements
                 specifications to be easily reused.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Maoz:2011:CMS,
  author =       "Shahar Maoz and David Harel and Asaf Kleinbort",
  title =        "A Compiler for Multimodal Scenarios: Transforming
                 {LSCs} into {AspectJ}",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "4",
  pages =        "18:1--18:??",
  month =        sep,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000799.2000804",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Sep 26 17:32:55 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We exploit the main similarity between the
                 aspect-oriented programming paradigm and the
                 inter-object, scenario-based approach to specification,
                 in order to construct a new way of executing systems
                 based on the latter. Specifically, we transform
                 multimodal scenario-based specifications, given in the
                 visual language of live sequence charts (LSC), into
                 what we call scenario aspects, implemented in AspectJ.
                 Unlike synthesis approaches, which attempt to take the
                 inter-object scenarios and construct intra-object
                 state-based per-object specifications or a single
                 controller automaton, we follow the ideas behind the
                 LSC play-out algorithm to coordinate the simultaneous
                 monitoring and direct execution of the specified
                 scenarios. Thus, the structure of the specification is
                 reflected in the structure of the generated code; the
                 high-level inter-object requirements and their
                 structure are not lost in the translation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Dagenais:2011:RAC,
  author =       "Barth{\'e}l{\'e}my Dagenais and Martin P. Robillard",
  title =        "Recommending Adaptive Changes for Framework
                 Evolution",
  journal =      j-TOSEM,
  volume =       "20",
  number =       "4",
  pages =        "19:1--19:??",
  month =        sep,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2000799.2000805",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Sep 26 17:32:55 MDT 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In the course of a framework's evolution, changes
                 ranging from a simple refactoring to a complete
                 rearchitecture can break client programs. Finding
                 suitable replacements for framework elements that were
                 accessed by a client program and deleted as part of the
                 framework's evolution can be a challenging task. We
                 present a recommendation system, SemDiff, that suggests
                 adaptations to client programs by analyzing how a
                 framework was adapted to its own changes. In a study of
                 the evolution of one open source framework and three
                 client programs, our approach recommended relevant
                 adaptive changes with a high level of precision.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Ebnenasir:2011:FSD,
  author =       "Ali Ebnenasir and Sandeep S. Kulkarni",
  title =        "Feasibility of Stepwise Design of Multitolerant
                 Programs",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "1",
  pages =        "1:1--1:??",
  month =        dec,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2063239.2063240",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Dec 20 18:31:08 MST 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The complexity of designing programs that
                 simultaneously tolerate multiple classes of faults,
                 called multitolerant programs, is in part due to the
                 conflicting nature of the fault tolerance requirements
                 that must be met by a multitolerant program when
                 different types of faults occur. To facilitate the
                 design of multitolerant programs, we present sound and
                 (deterministically) complete algorithms for stepwise
                 design of two families of multitolerant programs in a
                 high atomicity program model, where a process can read
                 and write all program variables in an atomic step. We
                 illustrate that if one needs to design failsafe
                 (respectively, nonmasking) fault tolerance for one
                 class of faults and masking fault tolerance for another
                 class of faults, then a multitolerant program can be
                 designed in separate polynomial-time (in the state
                 space of the fault-intolerant program) steps regardless
                 of the order of addition.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Arnold:2011:QER,
  author =       "Matthew Arnold and Martin Vechev and Eran Yahav",
  title =        "{QVM}: An Efficient Runtime for Detecting Defects in
                 Deployed Systems",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "1",
  pages =        "2:1--2:??",
  month =        dec,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2063239.2063241",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Dec 20 18:31:08 MST 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Coping with software defects that occur in the
                 post-deployment stage is a challenging problem: bugs
                 may occur only when the system uses a specific
                 configuration and only under certain usage scenarios.
                 Nevertheless, halting production systems until the bug
                 is tracked and fixed is often impossible. Thus,
                 developers have to try to reproduce the bug in
                 laboratory conditions. Often, the reproduction of the
                 bug takes most of the debugging effort. In this paper
                 we suggest an approach to address this problem by using
                 a specialized runtime environment called Quality
                 Virtual Machine (QVM). QVM efficiently detects defects
                 by continuously monitoring the execution of the
                 application in a production setting.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Tilevich:2011:EEP,
  author =       "Eli Tilevich and Sriram Gopal",
  title =        "Expressive and Extensible Parameter Passing for
                 Distributed Object Systems",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "1",
  pages =        "3:1--3:??",
  month =        dec,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2063239.2063242",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Dec 20 18:31:08 MST 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In modern distributed object systems, reference
                 parameters to a remote method are passed according to
                 their runtime type. This design choice limits the
                 expressiveness, readability, and maintainability of
                 distributed applications. Further, to extend the
                 built-in set of parameter passing semantics of a
                 distributed object system, the programmer has to
                 understand and modify the underlying middleware
                 implementation. To address these design shortcomings,
                 this article presents (i) a declarative and extensible
                 approach to remote parameter passing that decouples
                 parameter passing semantics from parameter types, and
                 (ii) a plugin-based framework, DeXteR, which enables
                 the programmer to extend the built-in set of remote
                 parameter passing semantics, without having to
                 understand or modify the underlying middleware
                 implementation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Sim:2011:HWD,
  author =       "Susan Elliott Sim and Medha Umarji and Sukanya
                 Ratanotayanon and Cristina V. Lopes",
  title =        "How Well Do Search Engines Support Code Retrieval on
                 the {Web}?",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "1",
  pages =        "4:1--4:??",
  month =        dec,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2063239.2063243",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Dec 20 18:31:08 MST 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Software developers search the Web for various kinds
                 of source code for diverse reasons. In a previous
                 study, we found that searches varied along two
                 dimensions: the size of the search target (e.g., block,
                 subsystem, or system) and the motivation for the search
                 (e.g., reference example or as-is reuse). Would each of
                 these kinds of searches require different search
                 technologies? To answer this question, we conducted an
                 experiment with 36 participants to evaluate three
                 diverse approaches (general purpose information
                 retrieval, source code search, and component reuse), as
                 represented by five Web sites (Google, Koders, Krugle,
                 Google Code Search, and SourceForge). The independent
                 variables were search engine, size of search target,
                 and motivation for search.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Diep:2011:LBS,
  author =       "Madeline M. Diep and Matthew B. Dwyer and Sebastian
                 Elbaum",
  title =        "Lattice-Based Sampling for Path Property Monitoring",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "1",
  pages =        "5:1--5:??",
  month =        dec,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2063239.2063244",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Dec 20 18:31:08 MST 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Runtime monitoring can provide important insights
                 about a program's behavior and, for simple properties,
                 it can be done efficiently. Monitoring properties
                 describing sequences of program states and events,
                 however, can result in significant runtime overhead.
                 This is particularly critical when monitoring programs
                 deployed at user sites that have low tolerance for
                 overhead. In this paper we present a novel approach to
                 reducing the cost of runtime monitoring of path
                 properties. A set of original properties are composed
                 to form a single integrated property that is then
                 systematically decomposed into a set of properties that
                 encode necessary conditions for property violations.
                 The resulting set of properties forms a lattice whose
                 structure is exploited to select a sample of properties
                 that can lower monitoring cost, while preserving
                 violation detection power relative to the original
                 properties.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Erwig:2011:CCR,
  author =       "Martin Erwig and Eric Walkingshaw",
  title =        "The Choice Calculus: a Representation for Software
                 Variation",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "1",
  pages =        "6:1--6:??",
  month =        dec,
  year =         "2011",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2063239.2063245",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Tue Dec 20 18:31:08 MST 2011",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many areas of computer science are concerned with some
                 form of variation in software---from managing changes
                 to software over time to supporting families of related
                 artifacts. We present the choice calculus, a
                 fundamental representation for software variation that
                 can serve as a common language of discourse for
                 variation research, filling a role similar to the
                 lambda calculus in programming language research. We
                 also develop an associated theory of software
                 variation, including sound transformations of variation
                 artifacts, the definition of strategic normal forms,
                 and a design theory for variation structures, which
                 will support the development of better algorithms and
                 tools.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Notkin:2012:E,
  author =       "David Notkin",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "7:1--7:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089117",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{AlDallal:2012:PMM,
  author =       "Jehad {Al Dallal} and Lionel C. Briand",
  title =        "A Precise Method-Method Interaction-Based Cohesion
                 Metric for Object-Oriented Classes",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "8:1--8:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089118",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The building of highly cohesive classes is an
                 important objective in object-oriented design. Class
                 cohesion refers to the relatedness of the class
                 members, and it indicates one important aspect of the
                 class design quality. A meaningful class cohesion
                 metric helps object-oriented software developers detect
                 class design weaknesses and refactor classes
                 accordingly. Several class cohesion metrics have been
                 proposed in the literature. Most of these metrics are
                 applicable based on low-level design information such
                 as attribute references in methods. Some of these
                 metrics capture class cohesion by counting the number
                 of method pairs that share common attributes. A few
                 metrics measure cohesion more precisely by considering
                 the degree of interaction, through attribute
                 references, between each pair of methods. However, the
                 formulas applied by these metrics to measure the degree
                 of interaction cause the metrics to violate important
                 mathematical properties, thus undermining their
                 construct validity and leading to misleading cohesion
                 measurement. In this paper, we propose a formula that
                 precisely measures the degree of interaction between
                 each pair of methods, and we use it as a basis to
                 introduce a low-level design class cohesion metric
                 (LSCC). We verify that the proposed formula does not
                 cause the metric to violate important mathematical
                 properties. In addition, we provide a mechanism to use
                 this metric as a useful indicator for refactoring
                 weakly cohesive classes, thus showing its usefulness in
                 improving class cohesion. Finally, we empirically
                 validate LSCC. Using four open source software systems
                 and eleven cohesion metrics, we investigate the
                 relationship between LSCC, other cohesion metrics, and
                 fault occurrences in classes. Our results show that
                 LSCC is one of three metrics that explains more
                 accurately the presence of faults in classes. LSCC is
                 the only one among the three metrics to comply with
                 important mathematical properties, and statistical
                 analysis shows it captures a measurement dimension of
                 its own. This suggests that LSCC is a better
                 alternative, when taking into account both theoretical
                 and empirical results, as a measure to guide the
                 refactoring of classes. From a more general standpoint,
                 the results suggest that class quality, as measured in
                 terms of fault occurrences, can be more accurately
                 explained by cohesion metrics that account for the
                 degree of interaction between each pair of methods.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Fischbein:2012:WAM,
  author =       "Dario Fischbein and Nicolas D'Ippolito and Greg Brunet
                 and Marsha Chechik and Sebastian Uchitel",
  title =        "Weak Alphabet Merging of Partial Behavior Models",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "9:1--9:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089119",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Constructing comprehensive operational models of
                 intended system behavior is a complex and costly task,
                 which can be mitigated by the construction of partial
                 behavior models, providing early feedback and
                 subsequently elaborating them iteratively. However, how
                 should partial behavior models with different
                 viewpoints covering different aspects of behavior be
                 composed? How should partial models of component
                 instances of the same type be put together? In this
                 article, we propose model merging of modal transition
                 systems (MTSs) as a solution to these questions. MTS
                 models are a natural extension of labelled transition
                 systems that support explicit modeling of what is
                 currently unknown about system behavior. We formally
                 define model merging based on weak alphabet refinement,
                 which guarantees property preservation, and show that
                 merging consistent models is a process that should
                 result in a minimal common weak alphabet refinement
                 (MCR). In this article, we provide theoretical results
                 and algorithms that support such a process. Finally,
                 because in practice MTS merging is likely to be
                 combined with other operations over MTSs such as
                 parallel composition, we also study the algebraic
                 properties of merging and apply these, together with
                 the algorithms that support MTS merging, in a case
                 study.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Mattsson:2012:AMA,
  author =       "Anders Mattsson and Brian Fitzgerald and Bj{\"o}rn
                 Lundell and Brian Lings",
  title =        "An Approach for Modeling Architectural Design Rules in
                 {UML} and its Application to Embedded Software",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "10:1--10:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089120",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Current techniques for modeling software architecture
                 do not provide sufficient support for modeling
                 architectural design rules. This is a problem in the
                 context of model-driven development in which it is
                 assumed that major design artifacts are represented as
                 formal or semi-formal models. This article addresses
                 this problem by presenting an approach to modeling
                 architectural design rules in UML at the abstraction
                 level of the meaning of the rules. The high abstraction
                 level and the use of UML makes the rules both amenable
                 to automation and easy to understand for both
                 architects and developers, which is crucial to
                 deployment in an organization. To provide a
                 proof-of-concept, a tool was developed that validates a
                 system model against the architectural rules in a
                 separate UML model. To demonstrate the feasibility of
                 the approach, the architectural design rules of an
                 existing live industrial-strength system were modeled
                 according to the approach.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Kulkarni:2012:GPF,
  author =       "Devdatta Kulkarni and Tanvir Ahmed and Anand
                 Tripathi",
  title =        "A Generative Programming Framework for Context-Aware
                 {CSCW} Applications",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "11:1--11:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089121",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We present a programming framework based on the
                 paradigm of generative application development for
                 building context-aware collaborative applications. In
                 this approach, context-aware applications are
                 implemented using a domain-specific design model, and
                 their execution environment is generated and maintained
                 by the middleware. The key features of this design
                 model include support for context-based service
                 discovery and binding, context-based access control,
                 context-based multiuser coordination, and
                 context-triggered automated task executions. The
                 middleware uses the technique of policy-based
                 specialization for generating application-specific
                 middleware components from the generic middleware
                 components. Through a case-study example, we
                 demonstrate this approach and present the evaluations
                 of the design model and the middleware.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Roychoudhury:2012:SMS,
  author =       "Abhik Roychoudhury and Ankit Goel and Bikram
                 Sengupta",
  title =        "Symbolic Message Sequence Charts",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "12:1--12:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089122",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Message sequence charts (MSCs) are a widely used
                 visual formalism for scenario-based specifications of
                 distributed reactive systems. In its conventional
                 usage, an MSC captures an interaction snippet between
                 concrete objects in the system. This leads to
                 voluminous specifications when the system contains
                 several objects that are behaviorally similar. MSCs
                 also play an important role in the model-based testing
                 of reactive systems, where they may be used for
                 specifying (partial) system behaviors, describing test
                 generation criteria, or representing test cases.
                 However, since the number of processes in a MSC
                 specification are fixed, model-based testing of systems
                 consisting of process classes may involve a significant
                 amount of rework: for example, reconstructing system
                 models, or regenerating test cases for systems
                 differing only in the number of processes of various
                 types. In this article we propose a scenario-based
                 notation, called symbolic message sequence charts
                 (SMSCs), for modeling, simulation, and testing of
                 process classes. SMSCs are a lightweight syntactic and
                 semantic extension of MSCs where, unlike MSCs, a SMSC
                 lifeline can denote some/all objects from a collection.
                 Our extensions give us substantially more modeling
                 power. Moreover, we present an abstract execution
                 semantics for (structured collections of) SMSCs. This
                 allows us to validate MSC-based system models capturing
                 interactions between large, or even unbounded, number
                 of objects. Finally, we describe a SMSC-based testing
                 methodology for process classes, which allows
                 generation of test cases for new object configurations
                 with minimal rework. Since our SMSC extensions are only
                 concerned with MSC lifelines, we believe that they can
                 be integrated into existing standards such as UML 2.0.
                 We illustrate our SMSC-based framework for modeling,
                 simulation, and testing of process classes using a
                 weather-update controller case-study from NASA.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Queralt:2012:VVU,
  author =       "Anna Queralt and Ernest Teniente",
  title =        "Verification and Validation of {UML} Conceptual
                 Schemas with {OCL} Constraints",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "2",
  pages =        "13:1--13:??",
  month =        mar,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2089116.2089123",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Mon Mar 19 17:14:21 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "To ensure the quality of an information system, it is
                 essential that the conceptual schema that represents
                 the knowledge about its domain is semantically correct.
                 The semantic correctness of a conceptual schema can be
                 seen from two different perspectives. On the one hand,
                 from the point of view of its definition, a conceptual
                 schema must be right. This is ensured by means of
                 verification techniques that check whether the schema
                 satisfies several correctness properties. On the other
                 hand, from the point of view of the requirements that
                 the information system should satisfy, a schema must
                 also be the right one. This is ensured by means of
                 validation techniques, which help the designer
                 understand the exact meaning of a schema and to see
                 whether it corresponds to the requirements. In this
                 article we propose an approach to verify and validate
                 UML conceptual schemas, with arbitrary constraints
                 formalized in OCL. We have also implemented our
                 approach to show its feasibility.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Kastner:2012:TCA,
  author =       "Christian K{\"a}stner and Sven Apel and Thomas
                 Th{\"u}m and Gunter Saake",
  title =        "Type checking annotation-based product lines",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "3",
  pages =        "14:1--14:??",
  month =        jun,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2211616.2211617",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jun 29 18:08:30 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Software product line engineering is an efficient
                 means of generating a family of program variants for a
                 domain from a single code base. However, because of the
                 potentially high number of possible program variants,
                 it is difficult to test them all and ensure properties
                 like type safety for the entire product line. We
                 present a product-line-aware type system that can type
                 check an entire software product line without
                 generating each variant in isolation. Specifically, we
                 extend the Featherweight Java calculus with feature
                 annotations for product-line development and prove
                 formally that all program variants generated from a
                 well typed product line are well typed. Furthermore, we
                 present a solution to the problem of typing mutually
                 exclusive features. We discuss how results from our
                 formalization helped implement our own product-line
                 tool CIDE for full Java and report of our experience
                 with detecting type errors in four existing software
                 product line implementations.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Shonle:2012:FCR,
  author =       "Macneil Shonle and William G. Griswold and Sorin
                 Lerner",
  title =        "A framework for the checking and refactoring of
                 crosscutting concepts",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "3",
  pages =        "15:1--15:??",
  month =        jun,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2211616.2211618",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jun 29 18:08:30 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Programmers employ crosscutting concepts, such as
                 design patterns and other programming idioms, when
                 their design ideas cannot be efficiently or effectively
                 modularized in the underlying programming language. As
                 a result, implementations of these crosscutting
                 concepts can be hard to change even when the code is
                 well structured. In this article, we describe Arcum, a
                 system that supports the modular maintenance of
                 crosscutting concepts. Arcum can be used to both check
                 essential constraints of crosscutting concepts and to
                 substitute crosscutting concept implementations with
                 alternative implementations. Arcum is complementary to
                 existing refactoring systems that focus on
                 meaning-preserving program transformations at the
                 programming-language-semantics level, because Arcum
                 focuses on transformations at the conceptual level. We
                 present the underpinnings of the Arcum approach and
                 show how Arcum can be used to address several classical
                 software engineering problems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Fantechi:2012:LVM,
  author =       "Alessandro Fantechi and Stefania Gnesi and Alessandro
                 Lapadula and Franco Mazzanti and Rosario Pugliese and
                 Francesco Tiezzi",
  title =        "A logical verification methodology for
                 service-oriented computing",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jun,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2211616.2211619",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jun 29 18:08:30 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We introduce a logical verification methodology for
                 checking behavioral properties of service-oriented
                 computing systems. Service properties are described by
                 means of SocL, a branching-time temporal logic that we
                 have specifically designed for expressing in an
                 effective way distinctive aspects of services, such as,
                 acceptance of a request, provision of a response,
                 correlation among service requests and responses, etc.
                 Our approach allows service properties to be expressed
                 in such a way that they can be independent of service
                 domains and specifications. We show an instantiation of
                 our general methodology that uses the formal language
                 COWS to conveniently specify services and the expressly
                 developed software tool CMC to assist the user in the
                 task of verifying SocL formulas over service
                 specifications. We demonstrate the feasibility and
                 effectiveness of our methodology by means of the
                 specification and analysis of a case study in the
                 automotive domain.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Strecker:2012:ADC,
  author =       "Jaymie Strecker and Atif M. Memon",
  title =        "Accounting for defect characteristics in evaluations
                 of testing techniques",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jun,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2211616.2211620",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jun 29 18:08:30 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "As new software-testing techniques are developed,
                 before they can achieve widespread acceptance, their
                 effectiveness at detecting defects must be evaluated.
                 The most common way of evaluating testing techniques is
                 with empirical studies, in which one or more techniques
                 are tried out on software with known defects. However,
                 the defects used can affect the performance of the
                 techniques. To complicate matters, it is not even clear
                 how to effectively describe or characterize defects. To
                 address these problems, this article describes an
                 experiment architecture for empirically evaluating
                 testing techniques which takes both defect and
                 test-suite characteristics into account. As proof of
                 concept, an experiment on GUI-testing techniques is
                 conducted. It provides evidence that the defect
                 characteristics proposed do help explain defect
                 detection, at least for GUI testing, and it explores
                 the relationship between the coverage of defective code
                 and the detection of defects.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Jennings:2012:TPA,
  author =       "Paul Jennings and Arka P. Ghosh and Samik Basu",
  title =        "A two-phase approximation for model checking
                 probabilistic unbounded until properties of
                 probabilistic systems",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jun,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2211616.2211621",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jun 29 18:08:30 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We have developed a new approximate probabilistic
                 model-checking method for untimed properties in
                 probabilistic systems, expressed in a probabilistic
                 temporal logic (PCTL, CSL). This method, in contrast to
                 the existing ones, does not require the untimed until
                 properties to be bounded a priori, where the bound
                 refers to the number of discrete steps in the system
                 required to verify the until property. The method
                 consists of two phases. In the first phase, a suitable
                 system- and property-dependent bound $ k_0 $ is
                 obtained automatically. In the second phase, the
                 probability of satisfying the $ k_0$-bounded until
                 property is computed as the estimate of the probability
                 of satisfying the original unbounded until property.
                 Both phases require only verification of bounded until
                 properties, which can be effectively performed by
                 simulation-based methods. We prove the correctness of
                 the proposed two-phase method and present its optimized
                 implementation in the widely used PRISM model-checking
                 engine. We compare this implementation with
                 sampling-based model-checking techniques implemented in
                 two tools: PRISM and MRMC. We show that for several
                 models these existing tools fail to compute the result,
                 while the two-phase method successfully computes the
                 result efficiently with respect to time and space.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Qi:2012:DAD,
  author =       "Dawei Qi and Abhik Roychoudhury and Zhenkai Liang and
                 Kapil Vaswani",
  title =        "{DARWIN}: an approach to debugging evolving programs",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "3",
  pages =        "19:1--19:??",
  month =        jun,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2211616.2211622",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Fri Jun 29 18:08:30 MDT 2012",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Bugs in programs are often introduced when programs
                 evolve from a stable version to a new version. In this
                 article, we propose a new approach called DARWIN for
                 automatically finding potential root causes of such
                 bugs. Given two programs --- a reference program and a
                 modified program --- and an input that fails on the
                 modified program, our approach uses symbolic execution
                 to automatically synthesize a new input that (a) is
                 very similar to the failing input and (b) does not
                 fail. We find the potential cause(s) of failure by
                 comparing control-flow behavior of the passing and
                 failing inputs and identifying code fragments where the
                 control flows diverge. A notable feature of our
                 approach is that it handles hard-to-explain bugs, like
                 code missing errors, by pointing to code in the
                 reference program. We have implemented this approach
                 and conducted experiments using several real-world
                 applications, such as the Apache Web server, libPNG (a
                 library for manipulating PNG images), and TCPflow (a
                 program for displaying data sent through TCP
                 connections). In each of these applications, DARWIN was
                 able to localize bugs with high accuracy. Even though
                 these applications contain several thousands of lines
                 of code, DARWIN could usually narrow down the potential
                 root cause(s) to less than ten lines. In addition, we
                 find that the inputs synthesized by DARWIN provide
                 additional value by revealing other undiscovered
                 errors.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Holmes:2012:SPS,
  author =       "Reid Holmes and Robert J. Walker",
  title =        "Systematizing pragmatic software reuse",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "4",
  pages =        "20:1--20:??",
  month =        nov,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2377656.2377657",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 2 06:46:47 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many software reuse tasks involve reusing source code
                 that was not designed in a manner conducive to those
                 tasks, requiring that ad hoc modifications be applied.
                 Such pragmatic reuse tasks are a reality in disciplined
                 industrial practice; they arise for a variety of
                 organizational and technical reasons. To investigate a
                 pragmatic reuse task, a developer must navigate
                 through, and reason about, source code dependencies in
                 order to identify program elements that are relevant to
                 the task and to decide how those elements should be
                 reused. The developer must then convert his mental
                 model of the task into a set of actions that he can
                 perform. These steps are poorly supported by modern
                 development tools and practices. We provide a model for
                 the process involved in performing a pragmatic reuse
                 task, including the need to capture (mentally or
                 otherwise) the developer's decisions about how each
                 program element should be treated: this is a
                 pragmatic-reuse plan. We provide partial support for
                 this model via a tool suite, called Gilligan; other
                 parts of the model are supported via standard IDE
                 tools. Using a pragmatic-reuse plan, Gilligan can
                 semiautomatically transform the selected source code
                 from its originating system and integrate it into the
                 developer's system. We have evaluated Gilligan through
                 a series of case studies and experiments (each
                 involving industrial developers) using a variety of
                 source systems and tasks; we report in particular on a
                 previously unpublished, formal experiment. The results
                 show that pragmatic-reuse plans are a robust metaphor
                 for capturing pragmatic reuse intent and that, relative
                 to standard IDE tools, Gilligan can (1) significantly
                 decrease the time that developers require to perform
                 pragmatic reuse tasks, (2) increase the likelihood that
                 developers will successfully complete pragmatic reuse
                 tasks, (3) decrease the time required by developers to
                 identify infeasible reuse tasks, and (4) improve
                 developers' sense of their ability to manage the risk
                 in such tasks.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Cai:2012:FMA,
  author =       "Yuanfang Cai and Kevin Sullivan",
  title =        "A formal model for automated software modularity and
                 evolvability analysis",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "4",
  pages =        "21:1--21:??",
  month =        nov,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2377656.2377658",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 2 06:46:47 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Neither the nature of modularity in software design,
                 characterized as a property of the structure of
                 dependencies among design decisions, or its economic
                 value are adequately well understood. One basic problem
                 is that we do not even have a sufficiently clear
                 definition of what it means for one design decision to
                 depend on another. The main contribution of this work
                 is one possible mathematically precise definition of
                 dependency based on an augmented constraint network
                 model. The model provides an end-to-end account of the
                 connection between modularity and its value in terms of
                 options to make adaptive changes in uncertain and
                 changing design spaces. We demonstrate the validity and
                 theoretical utility of the model, showing that it is
                 consistent with, and provides new insights into,
                 several previously published results in design
                 theory.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Cimatti:2012:VRH,
  author =       "Alessandro Cimatti and Marco Roveri and Angelo Susi
                 and Stefano Tonetta",
  title =        "Validation of requirements for hybrid systems: a
                 formal approach",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "4",
  pages =        "22:1--22:??",
  month =        nov,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2377656.2377659",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 2 06:46:47 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Flaws in requirements may have unacceptable
                 consequences in the development of safety-critical
                 applications. Formal approaches may help with a deep
                 analysis that takes care of the precise semantics of
                 the requirements. However, the proposed solutions often
                 disregard the problem of integrating the formalization
                 with the analysis, and the underlying logical framework
                 lacks either expressive power, or automation. We
                 propose a new, comprehensive approach for the
                 validation of functional requirements of hybrid
                 systems, where discrete components and continuous
                 components are tightly intertwined. The proposed
                 solution allows to tackle problems of conversion from
                 informal to formal, traceability, automation, user
                 acceptance, and scalability. We build on a new
                 language, othello which is expressive enough to
                 represent various domains of interest, yet allowing
                 efficient procedures for checking the satisfiability.
                 Around this, we propose a structured methodology where:
                 informal requirements are fragmented and categorized
                 according to their role; each fragment is formalized
                 based on its category; specialized formal analysis
                 techniques, optimized for requirements analysis, are
                 finally applied. The approach was the basis of an
                 industrial project aiming at the validation of the
                 European Train Control System (ETCS) requirements
                 specification. During the project a realistic subset of
                 the ETCS specification was formalized and analyzed. The
                 approach was positively assessed by domain experts.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Poshyvanyk:2012:CLU,
  author =       "Denys Poshyvanyk and Malcom Gethers and Andrian
                 Marcus",
  title =        "Concept location using formal concept analysis and
                 information retrieval",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "4",
  pages =        "23:1--23:??",
  month =        nov,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2377656.2377660",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 2 06:46:47 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The article addresses the problem of concept location
                 in source code by proposing an approach that combines
                 Formal Concept Analysis and Information Retrieval. In
                 the proposed approach, Latent Semantic Indexing, an
                 advanced Information Retrieval approach, is used to map
                 textual descriptions of software features or bug
                 reports to relevant parts of the source code, presented
                 as a ranked list of source code elements. Given the
                 ranked list, the approach selects the most relevant
                 attributes from the best ranked documents, clusters the
                 results, and presents them as a concept lattice,
                 generated using Formal Concept Analysis. The approach
                 is evaluated through a large case study on concept
                 location in the source code on six open-source systems,
                 using several hundred features and bugs. The empirical
                 study focuses on the analysis of various configurations
                 of the generated concept lattices and the results
                 indicate that our approach is effective in organizing
                 different concepts and their relationships present in
                 the subset of the search results. In consequence, the
                 proposed concept location method has been shown to
                 outperform a standalone Information Retrieval based
                 concept location technique by reducing the number of
                 irrelevant search results across all the systems and
                 lattice configurations evaluated, potentially reducing
                 the programmers' effort during software maintenance
                 tasks involving concept location.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Meneely:2012:VSM,
  author =       "Andrew Meneely and Ben Smith and Laurie Williams",
  title =        "Validating software metrics: a spectrum of
                 philosophies",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "4",
  pages =        "24:1--24:??",
  month =        nov,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2377656.2377661",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 2 06:46:47 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Context. Researchers proposing a new metric have the
                 burden of proof to demonstrate to the research
                 community that the metric is acceptable in its intended
                 use. This burden of proof is provided through the
                 multi-faceted, scientific, and objective process of
                 software metrics validation. Over the last 40 years,
                 however, researchers have debated what constitutes a
                 ``valid'' metric. Aim. The debate over what constitutes
                 a valid metric centers on software metrics validation
                 criteria. The objective of this article is to guide
                 researchers in making sound contributions to the field
                 of software engineering metrics by providing a
                 practical summary of the metrics validation criteria
                 found in the academic literature. Method. We conducted
                 a systematic literature review that began with 2,288
                 papers and ultimately focused on 20 papers. After
                 extracting 47 unique validation criteria from these 20
                 papers, we performed a comparative analysis to explore
                 the relationships amongst the criteria. Results. Our 47
                 validation criteria represent a diverse view of what
                 constitutes a valid metric. We present an analysis of
                 the criteria's categorization, conflicts, common
                 themes, and philosophical motivations behind the
                 validation criteria. Conclusions. Although the 47
                 validation criteria are not conflict-free, the
                 diversity of motivations and philosophies behind the
                 validation criteria indicates that metrics validation
                 is complex. Researchers proposing new metrics should
                 consider the applicability of the validation criteria
                 in terms of our categorization and analysis. Rather
                 than arbitrarily choosing validation criteria for each
                 metric, researchers should choose criteria that can
                 confirm that the metric is appropriate for its intended
                 use. We conclude that metrics validation criteria
                 provide answers to questions that researchers have
                 about the merits and limitations of a metric.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Kiezun:2012:HSW,
  author =       "Adam Kiezun and Vijay Ganesh and Shay Artzi and Philip
                 J. Guo and Pieter Hooimeijer and Michael D. Ernst",
  title =        "{HAMPI}: a solver for word equations over strings,
                 regular expressions, and context-free grammars",
  journal =      j-TOSEM,
  volume =       "21",
  number =       "4",
  pages =        "25:1--25:??",
  month =        nov,
  year =         "2012",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2377656.2377662",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 2 06:46:47 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many automatic testing, analysis, and verification
                 techniques for programs can be effectively reduced to a
                 constraint-generation phase followed by a
                 constraint-solving phase. This separation of concerns
                 often leads to more effective and maintainable software
                 reliability tools. The increasing efficiency of
                 off-the-shelf constraint solvers makes this approach
                 even more compelling. However, there are few effective
                 and sufficiently expressive off-the-shelf solvers for
                 string constraints generated by analysis of
                 string-manipulating programs, so researchers end up
                 implementing their own ad-hoc solvers. To fulfill this
                 need, we designed and implemented Hampi, a solver for
                 string constraints over bounded string variables. Users
                 of Hampi specify constraints using regular expressions,
                 context-free grammars, equality between string terms,
                 and typical string operations such as concatenation and
                 substring extraction. Hampi then finds a string that
                 satisfies all the constraints or reports that the
                 constraints are unsatisfiable. We demonstrate Hampi's
                 expressiveness and efficiency by applying it to program
                 analysis and automated testing. We used Hampi in static
                 and dynamic analyses for finding SQL injection
                 vulnerabilities in Web applications with hundreds of
                 thousands of lines of code. We also used Hampi in the
                 context of automated bug finding in C programs using
                 dynamic systematic testing (also known as concolic
                 testing). We then compared Hampi with another string
                 solver, CFGAnalyzer, and show that Hampi is several
                 times faster. Hampi's source code, documentation, and
                 experimental data are available at
                 \path=http://people.csail.mit.edu/akiezun/hampi=",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Notkin:2013:ELB,
  author =       "David Notkin",
  title =        "Editorial --- looking back",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2431201",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Rosenblum:2013:ELF,
  author =       "David S. Rosenblum",
  title =        "Editorial --- looking forward",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2431202",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Sun:2013:MVH,
  author =       "Jun Sun and Yang Liu and Jin Song Dong and Yan Liu and
                 Ling Shi and {\'E}tienne Andr{\'e}",
  title =        "Modeling and verifying hierarchical real-time systems
                 using stateful timed {CSP}",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430537",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Modeling and verifying complex real-time systems are
                 challenging research problems. The de facto approach is
                 based on Timed Automata, which are finite state
                 automata equipped with clock variables. Timed Automata
                 are deficient in modeling hierarchical complex systems.
                 In this work, we propose a language called Stateful
                 Timed CSP and an automated approach for verifying
                 Stateful Timed CSP models. Stateful Timed CSP is based
                 on Timed CSP and is capable of specifying hierarchical
                 real-time systems. Through dynamic zone abstraction,
                 finite-state zone graphs can be generated automatically
                 from Stateful Timed CSP models, which are subject to
                 model checking. Like Timed Automata, Stateful Timed CSP
                 models suffer from Zeno runs, that is, system runs that
                 take infinitely many steps within finite time. Unlike
                 Timed Automata, model checking with non-Zenoness in
                 Stateful Timed CSP can be achieved based on the zone
                 graphs. We extend the PAT model checker to support
                 system modeling and verification using Stateful Timed
                 CSP and show its usability/scalability via verification
                 of real-world systems.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Demsky:2013:VSF,
  author =       "Brian Demsky and Patrick Lam",
  title =        "Views: {Synthesizing} fine-grained concurrency
                 control",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430538",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Fine-grained locking is often necessary to increase
                 concurrency. Correctly implementing fine-grained
                 locking with today's concurrency primitives can be
                 challenging-race conditions often plague programs with
                 sophisticated locking schemes. We present views, a new
                 approach to concurrency control. Views ease the task of
                 implementing sophisticated locking schemes and provide
                 static checks to automatically detect many data races.
                 A view of an object declares a partial interface,
                 consisting of fields and methods, to the object that
                 the view protects. A view also contains an
                 incompatibility declaration, which lists views that may
                 not be simultaneously held by other threads. A set of
                 view annotations specify which code regions hold a view
                 of an object. Our view compiler performs simple static
                 checks that identify many data races. We pair the basic
                 approach with an inference algorithm that can infer
                 view incompatibility specifications for many
                 applications. We have ported four benchmark
                 applications to use views: portions of Vuze, a
                 BitTorrent client; Mailpuccino, a graphical email
                 client; jphonelite, a VoIP softphone implementation;
                 and TupleSoup, a database. Our experience indicates
                 that views are easy to use, make implementing
                 sophisticated locking schemes simple, and can help
                 eliminate concurrency bugs. We have evaluated the
                 performance of a view implementation of a red-black
                 tree and found that views can significantly improve
                 performance over that of the lock-based
                 implementation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Yue:2013:FTU,
  author =       "Tao Yue and Lionel C. Briand and Yvan Labiche",
  title =        "Facilitating the transition from use case models to
                 analysis models: Approach and experiments",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430539",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Use case modeling, including use case diagrams and use
                 case specifications (UCSs), is commonly applied to
                 structure and document requirements. UCSs are usually
                 structured but unrestricted textual documents complying
                 with a certain use case template. However, because Use
                 Case Models (UCMods) remain essentially textual,
                 ambiguity is inevitably introduced. In this article, we
                 propose a use case modeling approach, called Restricted
                 Use Case Modeling (RUCM), which is composed of a set of
                 well-defined restriction rules and a modified use case
                 template. The goal is two-fold: (1) restrict the way
                 users can document UCSs in order to reduce ambiguity
                 and (2) facilitate the manual derivation of initial
                 analysis models which, when using the Unified Modeling
                 Language (UML), are typically composed of class
                 diagrams, sequence diagrams, and possibly other types
                 of diagrams. Though the proposed restriction rules and
                 template are based on a clear rationale, two main
                 questions need to be investigated. First, do users find
                 them too restrictive or impractical in certain
                 situations? In other words, can users express the same
                 requirements with RUCM as with unrestricted use cases?
                 Second, do the rules and template have a positive,
                 significant impact on the quality of the constructed
                 analysis models? To investigate these questions, we
                 performed and report on two controlled experiments,
                 which evaluate the restriction rules and use case
                 template in terms of (1) whether they are easy to apply
                 while developing UCMods and facilitate the
                 understanding of UCSs, and (2) whether they help users
                 manually derive higher quality analysis models than
                 what can be generated when they are not used, in terms
                 of correctness, completeness, and redundancy. This
                 article reports on the first controlled experiments
                 that evaluate the applicability of restriction rules on
                 use case modeling and their impact on the quality of
                 analysis models. The measures we have defined to
                 characterize restriction rules and the quality of
                 analysis class and sequence diagrams can be reused to
                 perform similar experiments in the future, either with
                 RUCM or other approaches. Results show that the
                 restriction rules are overall easy to apply and that
                 RUCM results into significant improvements over
                 traditional approaches (i.e., with standard templates,
                 without restrictions) in terms of class correctness and
                 class diagram completeness, message correctness and
                 sequence diagram completeness, and understandability of
                 UCSs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Hemmati:2013:ASM,
  author =       "Hadi Hemmati and Andrea Arcuri and Lionel Briand",
  title =        "Achieving scalable model-based testing through test
                 case diversity",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430540",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The increase in size and complexity of modern software
                 systems requires scalable, systematic, and automated
                 testing approaches. Model-based testing (MBT), as a
                 systematic and automated test case generation
                 technique, is being successfully applied to verify
                 industrial-scale systems and is supported by commercial
                 tools. However, scalability is still an open issue for
                 large systems, as in practice there are limits to the
                 amount of testing that can be performed in industrial
                 contexts. Even with standard coverage criteria, the
                 resulting test suites generated by MBT techniques can
                 be very large and expensive to execute, especially for
                 system level testing on real deployment platforms and
                 network facilities. Therefore, a scalable MBT technique
                 should be flexible regarding the size of the generated
                 test suites and should be easily accommodated to fit
                 resource and time constraints. Our approach is to
                 select a subset of the generated test suite in such a
                 way that it can be realistically executed and analyzed
                 within the time and resource constraints, while
                 preserving the fault revealing power of the original
                 test suite to a maximum extent. In this article, to
                 address this problem, we introduce a family of
                 similarity-based test case selection techniques for
                 test suites generated from state machines. We evaluate
                 320 different similarity-based selection techniques and
                 then compare the effectiveness of the best
                 similarity-based selection technique with other common
                 selection techniques in the literature. The results
                 based on two industrial case studies, in the domain of
                 embedded systems, show significant benefits and a large
                 improvement in performance when using a
                 similarity-based approach. We complement these analyses
                 with further studies on the scalability of the
                 technique and the effects of failure rate on its
                 effectiveness. We also propose a method to identify
                 optimal tradeoffs between the number of test cases to
                 run and fault detection.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Monperrus:2013:DMM,
  author =       "Martin Monperrus and Mira Mezini",
  title =        "Detecting missing method calls as violations of the
                 majority rule",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430541",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "When using object-oriented frameworks it is easy to
                 overlook certain important method calls that are
                 required at particular places in code. In this article,
                 we provide a comprehensive set of empirical facts on
                 this problem, starting from traces of missing method
                 calls in a bug repository. We propose a new system that
                 searches for missing method calls in software based on
                 the other method calls that are observable. Our key
                 insight is that the voting theory concept of majority
                 rule holds for method calls: a call is likely to be
                 missing if there is a majority of similar pieces of
                 code where this call is present. The evaluation shows
                 that the system predictions go further missing method
                 calls and often reveal different kinds of code smells
                 (e.g., violations of API best practices).",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Huang:2013:SPA,
  author =       "Jeff Huang and Jinguo Zhou and Charles Zhang",
  title =        "Scaling predictive analysis of concurrent programs by
                 removing trace redundancy",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430542",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Predictive trace analysis (PTA) of concurrent programs
                 is powerful in finding concurrency bugs unseen in past
                 program executions. Unfortunately, existing PTA
                 solutions face considerable challenges in scaling to
                 large traces. In this article, we identify that a large
                 percentage of events in the trace are redundant for
                 presenting useful analysis results to the end user.
                 Removing them from the trace can significantly improve
                 the scalability of PTA without affecting the quality of
                 the results. We present a trace redundancy theorem that
                 specifies a redundancy criterion and the soundness
                 guarantee that the PTA results are preserved after
                 removing the redundancy. Based on this criterion, we
                 design and implement TraceFilter, an efficient
                 algorithm that automatically removes redundant events
                 from a trace for the PTA of general concurrency access
                 anomalies. We evaluated TraceFilter on a set of popular
                 concurrent benchmarks as well as real world large
                 server programs. Our experimental results show that
                 TraceFilter is able to significantly improve the
                 scalability of PTA by orders of magnitude, without
                 impairing the analysis result.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Dippolito:2013:SNE,
  author =       "Nicol{\'a}s D'ippolito and Victor Braberman and Nir
                 Piterman and Sebasti{\'a}n Uchitel",
  title =        "Synthesizing nonanomalous event-based controllers for
                 liveness goals",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "1",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430536.2430543",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Mar 2 09:22:48 MST 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We present SGR(1), a novel synthesis technique and
                 methodological guidelines for automatically
                 constructing event-based behavior models. Our approach
                 works for an expressive subset of liveness properties,
                 distinguishes between controlled and monitored actions,
                 and differentiates system goals from environment
                 assumptions. We show that assumptions must be modeled
                 carefully in order to avoid synthesizing anomalous
                 behavior models. We characterize nonanomalous models
                 and propose assumption compatibility, a sufficient
                 condition, as a methodological guideline.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zhang:2013:CDC,
  author =       "Wei Zhang and Chong Sun and Junghee Lim and Shan Lu
                 and Thomas Reps",
  title =        "{ConMem}: Detecting Crash-Triggering Concurrency Bugs
                 through an Effect-Oriented Approach",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "2",
  pages =        "10:1--10:??",
  month =        mar,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430545.2430546",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Mar 27 05:43:25 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Multicore technology is making concurrent programs
                 increasingly pervasive. Unfortunately, it is difficult
                 to deliver reliable concurrent programs, because of the
                 huge and nondeterministic interleaving space. In
                 reality, without the resources to thoroughly check the
                 interleaving space, critical concurrency bugs can slip
                 into production versions and cause failures in the
                 field. Approaches to making the best use of the limited
                 resources and exposing severe concurrency bugs before
                 software release would be desirable. Unlike previous
                 work that focuses on bugs caused by specific
                 interleavings (e.g., races and atomicity violations),
                 this article targets concurrency bugs that result in
                 one type of severe effect: program crashes. Our study
                 of the error-propagation process of real-world
                 concurrency bugs reveals a common pattern (50\% in our
                 nondeadlock concurrency bug set) that is highly
                 correlated with program crashes. We call this pattern
                 concurrency-memory bugs: buggy interleavings directly
                 cause memory bugs (NULL-pointer-dereferences,
                 dangling-pointers, buffer-overflows,
                 uninitialized-reads) on shared memory objects. Guided
                 by this study, we built ConMem to monitor program
                 execution, analyze memory accesses and
                 synchronizations, and predictively detect these common
                 and severe concurrency-memory bugs. We also built a
                 validator,ConMem-v, to automatically prune false
                 positives by enforcing potential bug-triggering
                 interleavings. We evaluated ConMem using 7 open-source
                 programs with 10 real-world concurrency bugs. ConMem
                 detects more tested bugs (9 out of 10 bugs) than a
                 lock-set-based race detector and an
                 unserializable-interleaving detector, which detect 4
                 and 6 bugs, respectively, with a false-positive rate
                 about one tenth of the compared tools. ConMem-v further
                 prunes out all the false positives. ConMem has
                 reasonable overhead suitable for development usage.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Rosa:2013:BPM,
  author =       "Marcello {La Rosa} and Marlon Dumas and Reina Uba and
                 Remco Dijkman",
  title =        "Business Process Model Merging: An Approach to
                 Business Process Consolidation",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "2",
  pages =        "11:1--11:??",
  month =        mar,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430545.2430547",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Mar 27 05:43:25 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "This article addresses the problem of constructing
                 consolidated business process models out of collections
                 of process models that share common fragments. The
                 article considers the construction of unions of
                 multiple models (called merged models ) as well as
                 intersections (called digests ). Merged models are
                 intended for analysts who wish to create a model that
                 subsumes a collection of process models --- typically
                 representing variants of the same underlying process
                 --- with the aim of replacing the variants with the
                 merged model. Digests, on the other hand, are intended
                 for analysts who wish to identify the most recurring
                 fragments across a collection of process models, so
                 that they can focus their efforts on optimizing these
                 fragments. The article presents an algorithm for
                 computing merged models and an algorithm for extracting
                 digests from a merged model. The merging and digest
                 extraction algorithms have been implemented and tested
                 against collections of process models taken from
                 multiple application domains. The tests show that the
                 merging algorithm produces compact models and scales up
                 to process models containing hundreds of nodes.
                 Furthermore, a case study conducted in a large
                 insurance company has demonstrated the usefulness of
                 the merging and digest extraction operators in a
                 practical setting.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zheng:2013:PRP,
  author =       "Zibin Zheng and Michael R. Lyu",
  title =        "Personalized Reliability Prediction of {Web}
                 Services",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "2",
  pages =        "12:1--12:??",
  month =        mar,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430545.2430548",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Mar 27 05:43:25 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Service Oriented Architecture (SOA) is a
                 business-centric IT architectural approach for building
                 distributed systems. Reliability of service-oriented
                 systems heavily depends on the remote Web services as
                 well as the unpredictable Internet connections.
                 Designing efficient and effective reliability
                 prediction approaches of Web services has become an
                 important research issue. In this article, we propose
                 two personalized reliability prediction approaches of
                 Web services, that is, neighborhood-based approach and
                 model-based approach. The neighborhood-based approach
                 employs past failure data of similar neighbors (either
                 service users or Web services) to predict the Web
                 service reliability. On the other hand, the model-based
                 approach fits a factor model based on the available Web
                 service failure data and use this factor model to make
                 further reliability prediction. Extensive experiments
                 are conducted with our real-world Web service datasets,
                 which include about 23 millions invocation results on
                 more than 3,000 real-world Web services. The
                 experimental results show that our proposed reliability
                 prediction approaches obtain better reliability
                 prediction accuracy than other competing approaches.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Walkinshaw:2013:ACS,
  author =       "Neil Walkinshaw and Kirill Bogdanov",
  title =        "Automated Comparison of State-Based Software Models in
                 Terms of Their Language and Structure",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "2",
  pages =        "13:1--13:??",
  month =        mar,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430545.2430549",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Mar 27 05:43:25 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "State machines capture the sequential behavior of
                 software systems. Their intuitive visual notation,
                 along with a range of powerful verification and testing
                 techniques render them an important part of the
                 model-driven software engineering process. There are
                 several situations that require the ability to identify
                 and quantify the differences between two state machines
                 (e.g. to evaluate the accuracy of state machine
                 inference techniques is measured by the similarity of a
                 reverse-engineered model to its reference model). State
                 machines can be compared from two complementary
                 perspectives: (1) In terms of their language --- the
                 externally observable sequences of events that are
                 permitted or not, and (2) in terms of their structure
                 --- the actual states and transitions that govern the
                 behavior. This article describes two techniques to
                 compare models in terms of these two perspectives. It
                 shows how the difference can be quantified and measured
                 by adapting existing binary classification performance
                 measures for the purpose. The approaches have been
                 implemented by the authors, and the implementation is
                 openly available. Feasibility is demonstrated via a
                 case study to compare two real state machine inference
                 approaches. Scalability and accuracy are assessed
                 experimentally with respect to a large collection of
                 randomly synthesized models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Fleming:2013:IFT,
  author =       "Scott D. Fleming and Chris Scaffidi and David
                 Piorkowski and Margaret Burnett and Rachel Bellamy and
                 Joseph Lawrance and Irwin Kwan",
  title =        "An Information Foraging Theory Perspective on Tools
                 for Debugging, Refactoring, and Reuse Tasks",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "2",
  pages =        "14:1--14:??",
  month =        mar,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430545.2430551",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Mar 27 05:43:25 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Theories of human behavior are an important but
                 largely untapped resource for software engineering
                 research. They facilitate understanding of human
                 developers' needs and activities, and thus can serve as
                 a valuable resource to researchers designing software
                 engineering tools. Furthermore, theories abstract
                 beyond specific methods and tools to fundamental
                 principles that can be applied to new situations.
                 Toward filling this gap, we investigate the
                 applicability and utility of Information Foraging
                 Theory (IFT) for understanding information-intensive
                 software engineering tasks, drawing upon literature in
                 three areas: debugging, refactoring, and reuse. In
                 particular, we focus on software engineering tools that
                 aim to support information-intensive activities, that
                 is, activities in which developers spend time seeking
                 information. Regarding applicability, we consider
                 whether and how the mathematical equations within IFT
                 can be used to explain why certain existing tools have
                 proven empirically successful at helping software
                 engineers. Regarding utility, we applied an IFT
                 perspective to identify recurring design patterns in
                 these successful tools, and consider what opportunities
                 for future research are revealed by our IFT
                 perspective.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Chaki:2013:VAI,
  author =       "Sagar Chaki and Christian Schallhart and Helmut
                 Veith",
  title =        "Verification across Intellectual Property Boundaries",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "2",
  pages =        "15:1--15:??",
  month =        mar,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2430545.2430550",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Mar 27 05:43:25 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In many industries, the importance of software
                 components provided by third-party suppliers is
                 steadily increasing. As the suppliers seek to secure
                 their intellectual property (IP) rights, the customer
                 usually has no direct access to the suppliers' source
                 code, and is able to enforce the use of verification
                 tools only by legal requirements. In turn, the supplier
                 has no means to convince the customer about successful
                 verification without revealing the source code. This
                 article presents an approach to resolve the conflict
                 between the IP interests of the supplier and the
                 quality interests of the customer. We introduce a
                 protocol in which a dedicated server (called the
                 ``amanat'') is controlled by both parties: the customer
                 controls the verification task performed by the amanat,
                 while the supplier controls the communication channels
                 of the amanat to ensure that the amanat does not leak
                 information about the source code. We argue that the
                 protocol is both practically useful and mathematically
                 sound. As the protocol is based on well-known (and
                 relatively lightweight) cryptographic primitives, it
                 allows a straightforward implementation on top of
                 existing verification tool chains. To substantiate our
                 security claims, we establish the correctness of the
                 protocol by cryptographic reduction proofs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "15",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Rosenblum:2013:MDN,
  author =       "David S. Rosenblum",
  title =        "In memoriam: {David Notkin} (1955--2013)",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "16:1--16:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491510",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "16",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Xu:2013:PML,
  author =       "Guoqing Xu and Atanas Rountev",
  title =        "Precise memory leak detection for {Java} software
                 using container profiling",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "17:1--17:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491511",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/java2010.bib;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "A memory leak in a Java program occurs when object
                 references that are no longer needed are unnecessarily
                 maintained. Such leaks are difficult to detect because
                 static analysis typically cannot precisely identify
                 these redundant references, and existing dynamic leak
                 detection tools track and report fine-grained
                 information about individual objects, producing results
                 that are usually hard to interpret and lack precision.
                 In this article we introduce a novel container-based
                 heap-tracking technique, based on the fact that many
                 memory leaks in Java programs occur due to incorrect
                 uses of containers, leading to containers that keep
                 references to unused data entries. The novelty of the
                 described work is twofold: (1) instead of tracking
                 arbitrary objects and finding leaks by analyzing
                 references to unused objects, the technique tracks only
                 containers and directly identifies the source of the
                 leak, and (2) the technique computes a confidence value
                 for each container based on a combination of its memory
                 consumption and its elements' staleness (time since
                 last retrieval), while previous approaches do not
                 consider such combined metrics. Our experimental
                 results show that the reports generated by the proposed
                 technique can be very precise: for two bugs reported by
                 Sun, a known bug in SPECjbb 2000, and an example bug
                 from IBM developerWorks, the top containers in the
                 reports include the containers that leak memory.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "17",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Le:2013:MDF,
  author =       "Wei Le and Mary Lou Soffa",
  title =        "{Marple}: {Detecting} faults in path segments using
                 automatically generated analyses",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "18:1--18:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491512",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Generally, a fault is a property violation at a
                 program point along some execution path. To obtain the
                 path where a fault occurs, we can either run the
                 program or manually identify the execution paths
                 through code inspection. In both of the cases, only a
                 very limited number of execution paths can be examined
                 for a program. This article presents a static
                 framework, Marple, that automatically detects path
                 segments where a fault occurs at a whole program scale.
                 An important contribution of the work is the design of
                 a demand-driven analysis that effectively addresses
                 scalability challenges faced by traditional
                 path-sensitive fault detection. The techniques are made
                 general via a specification language and an algorithm
                 that automatically generates path-based analyses from
                 specifications. The generality is achieved in handling
                 both data- and control-centric faults as well as both
                 liveness and safety properties, enabling the
                 exploitation of fault interactions for diagnosis and
                 efficiency. Our experimental results demonstrate the
                 effectiveness of our techniques in detecting path
                 segments of buffer overflows, integer violations,
                 null-pointer dereferences, and memory leaks. Because we
                 applied an interprocedural, path-sensitive analysis,
                 our static fault detectors generally report better
                 precision than the tools available for comparison. Our
                 demand-driven analyses are shown scalable to deployed
                 applications such as apache, putty, and ffmpeg.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "18",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Yoo:2013:FLP,
  author =       "Shin Yoo and Mark Harman and David Clark",
  title =        "Fault localization prioritization: Comparing
                 information-theoretic and coverage-based approaches",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "19:1--19:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491513",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Test case prioritization techniques seek to maximize
                 early fault detection. Fault localization seeks to use
                 test cases already executed to help find the fault
                 location. There is a natural interplay between the two
                 techniques; once a fault is detected, we often switch
                 focus to fault fixing, for which localization may be a
                 first step. In this article we introduce the Fault
                 Localization Prioritization (FLP) problem, which
                 combines prioritization and localization. We evaluate
                 three techniques: a novel FLP technique based on
                 information theory, FLINT (Fault Localization using
                 INformation Theory), that we introduce in this article,
                 a standard Test Case Prioritization (TCP) technique,
                 and a ``test similarity technique'' used in previous
                 work. Our evaluation uses five different releases of
                 four software systems. The results indicate that FLP
                 and TCP can statistically significantly reduce fault
                 localization costs for 73\% and 76\% of cases,
                 respectively, and that FLINT significantly outperforms
                 similarity-based localization techniques in 52\% of the
                 cases considered in the study.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "19",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Pradella:2013:BSC,
  author =       "Matteo Pradella and Angelo Morzenti and Pierluigi {San
                 Pietro}",
  title =        "Bounded satisfiability checking of metric temporal
                 logic specifications",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "20:1--20:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491514",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We introduce bounded satisfiability checking, a
                 verification technique that extends bounded model
                 checking by allowing also the analysis of a descriptive
                 model, consisting of temporal logic formulae, instead
                 of the more customary operational model, consisting of
                 a state transition system. We define techniques for
                 encoding temporal logic formulae into Boolean logic
                 that support the use of bi-infinite time domain and of
                 metric time operators. In the framework of bounded
                 satisfiability checking, we show how a descriptive
                 model can be refined into an operational one, and how
                 the correctness of such a refinement can be verified
                 for the bounded case, setting the stage for a stepwise
                 system development method based on a bounded model
                 refinement. Finally, we show how the adoption of a
                 modular approach can make the bounded refinement
                 process more manageable and efficient. All introduced
                 concepts are extensively applied to a set of case
                 studies, and thoroughly experimented through Zot, our
                 SAT solver-based verification toolset.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "20",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Falessi:2013:VDR,
  author =       "Davide Falessi and Lionel C. Briand and Giovanni
                 Cantone and Rafael Capilla and Philippe Kruchten",
  title =        "The value of design rationale information",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "21:1--21:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491515",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "A complete and detailed (full) Design Rationale
                 Documentation (DRD) could support many software
                 development activities, such as an impact analysis or a
                 major redesign. However, this is typically too onerous
                 for systematic industrial use as it is not cost
                 effective to write, maintain, or read. The key idea
                 investigated in this article is that DRD should be
                 developed only to the extent required to support
                 activities particularly difficult to execute or in need
                 of significant improvement in a particular context. The
                 aim of this article is to empirically investigate the
                 customization of the DRD by documenting only the
                 information items that will probably be required for
                 executing an activity. This customization strategy
                 relies on the hypothesis that the value of a specific
                 DRD information item depends on its category (e.g.,
                 assumptions, related requirements, etc.) and on the
                 activity it is meant to support. We investigate this
                 hypothesis through two controlled experiments involving
                 a total of 75 master students as experimental subjects.
                 Results show that the value of a DRD information item
                 significantly depends on its category and, within a
                 given category, on the activity it supports.
                 Furthermore, on average among activities, documenting
                 only the information items that have been required at
                 least half of the time (i.e., the information that will
                 probably be required in the future) leads to a
                 customized DRD containing about half the information
                 items of a full documentation. We expect that such a
                 significant reduction in DRD information should
                 mitigate the effects of some inhibitors that currently
                 prevent practitioners from documenting design decision
                 rationale.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "21",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Hoffman:2013:TOM,
  author =       "Kevin Hoffman and Patrick Eugster",
  title =        "Trading obliviousness for modularity with cooperative
                 aspect-oriented programming",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "22:1--22:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491516",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "The potential of aspect-oriented programming to
                 adequately capture crosscutting concerns has yet to be
                 fully realized. For example, authors have detailed
                 significant challenges in creating reusable aspect
                 component libraries. One proposed solution is to
                 introduce Explicit Join Points (EJPs) to increase
                 modularity by reducing obliviousness, enabling a
                 Cooperative Aspect-Oriented Programming (Co-AOP)
                 methodology where base code and aspects synergistically
                 collaborate. This article explores the trade-offs
                 between obliviousness and modularity. We briefly
                 introduce EJPs and Co-AOP, and hypothesize how to
                 balance obliviousness and modularity using Co-AOP. We
                 build upon a prior empirical study to refactor three
                 real-life Java applications to implement the exception
                 handling concern using three distinct strategies: (1)
                 using fully oblivious aspects in AspectJ, (2) using
                 EJPs in a fully explicit fashion, and (3) using EJPs
                 while following the Co-AOP methodology. We study other
                 crosscutting concerns by refactoring a fourth
                 application, JHotDraw. The differences in terms of
                 common code metrics are analyzed, and the impact on
                 modularity is assessed using design structure matrices.
                 Results indicate that the Co-AOP methodology can in
                 many cases significantly improve code quality
                 attributes versus fully oblivious or fully explicit
                 approaches. We conclude with guiding principles on the
                 proper use of EJPs within the Co-AOP methodology.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "22",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zhu:2013:ADP,
  author =       "Hong Zhu and Ian Bayley",
  title =        "An algebra of design patterns",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "23:1--23:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491517",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In a pattern-oriented software design process, design
                 decisions are made by selecting and instantiating
                 appropriate patterns, and composing them together. In
                 our previous work, we enabled these decisions to be
                 formalized by defining a set of operators on patterns
                 with which instantiations and compositions can be
                 represented. In this article, we investigate the
                 algebraic properties of these operators. We provide and
                 prove a complete set of algebraic laws so that
                 equivalence between pattern expressions can be proven.
                 Furthermore, we define an always-terminating
                 normalization of pattern expression to a canonical form
                 which is unique modulo equivalence in first-order
                 logic. By a case study, the pattern-oriented design of
                 an extensible request-handling framework, we
                 demonstrate two practical applications of the algebraic
                 framework. First, we can prove the correctness of a
                 finished design with respect to the design decisions
                 made and the formal specification of the patterns.
                 Second, we can even derive the design from these
                 components.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "23",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Balaban:2013:FSU,
  author =       "Mira Balaban and Azzam Maraee",
  title =        "Finite satisfiability of {UML} class diagrams with
                 constrained class hierarchy",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "24:1--24:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491518",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Models lie at the heart of the emerging model-driven
                 engineering approach. In order to guarantee precise,
                 consistent, and correct models, there is a need for
                 efficient powerful methods for verifying model
                 correctness. Class diagram is the central language
                 within UML. Its correctness problems involve issues of
                 contradiction, namely the consistency problem, and
                 issues of finite instantiation, namely the finite
                 satisfiability problem. This article analyzes the
                 problem of finite satisfiability of class diagrams with
                 class hierarchy constraints and generalization-set
                 constraints. The article introduces the FiniteSat
                 algorithm for efficient detection of finite
                 satisfiability in such class diagrams, and analyzes its
                 limitations in terms of complex hierarchy structures.
                 FiniteSat is strengthened in two directions. First, an
                 algorithm for identification of the cause for a finite
                 satisfiability problem is introduced. Second, a method
                 for propagation of generalization-set constraints in a
                 class diagram is introduced. The propagation method
                 serves as a preprocessing step that improves FiniteSat
                 performance, and helps developers in clarifying
                 intended constraints. These algorithms are implemented
                 in the FiniteSatUSE tool [BGU Modeling Group 2011b], as
                 part of our ongoing effort for constructing a
                 model-level integrated development environment [BGU
                 Modeling Group 2010a].",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "24",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{DeCaso:2013:EBP,
  author =       "Guido {De Caso} and Victor Braberman and Diego
                 Garbervetsky and Sebastian Uchitel",
  title =        "Enabledness-based program abstractions for behavior
                 validation",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "25:1--25:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491519",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Code artifacts that have nontrivial requirements with
                 respect to the ordering in which their methods or
                 procedures ought to be called are common and appear,
                 for instance, in the form of API implementations and
                 objects. This work addresses the problem of validating
                 if API implementations provide their intended behavior
                 when descriptions of this behavior are informal,
                 partial, or nonexistent. The proposed approach
                 addresses this problem by generating abstract behavior
                 models which resemble typestates. These models are
                 statically computed and encode all admissible sequences
                 of method calls. The level of abstraction at which such
                 models are constructed has shown to be useful for
                 validating code artifacts and identifying findings
                 which led to the discovery of bugs, adjustment of the
                 requirements expected by the engineer to the
                 requirements implicit in the code, and the improvement
                 of available documentation.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "25",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Marin:2013:UFS,
  author =       "Beatriz Mar{\'\i}n and Giovanni Giachetti and Oscar
                 Pastor and Tanja E. J. Vos and Alain Abran",
  title =        "Using a functional size measurement procedure to
                 evaluate the quality of models in {MDD} environments",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "26:1--26:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491520",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Models are key artifacts in Model-Driven Development
                 (MDD) methods. To produce high-quality software by
                 using MDD methods, quality assurance of models is of
                 paramount importance. To evaluate the quality of
                 models, defect detection is considered a suitable
                 approach and is usually applied using reading
                 techniques. However, these reading techniques have
                 limitations and constraints, and new techniques are
                 required to improve the efficiency at finding as many
                 defects as possible. This article presents a case study
                 that has been carried out to evaluate the use of a
                 Functional Size Measurement (FSM) procedure in the
                 detection of defects in models of an MDD environment.
                 To do this, we compare the defects and the defect types
                 found by an inspection group with the defects and the
                 defect types found by the FSM procedure. The results
                 indicate that the FSM is useful since it finds all the
                 defects related to a specific defect type, it finds
                 different defect types than an inspection group, and it
                 finds defects related to the correctness and the
                 consistency of the models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "26",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Sinnig:2013:UCT,
  author =       "Daniel Sinnig and Patrice Chalin and Ferhat Khendek",
  title =        "Use case and task models: an integrated development
                 methodology and its formal foundation",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "3",
  pages =        "27:1--27:??",
  month =        jul,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491509.2491521",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Jul 27 08:26:00 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "User Interface (UI) development methods are poorly
                 integrated with standard software engineering practice.
                 The differences in terms of artifacts involved,
                 development philosophies, and lifecycles can often
                 result in inconsistent system and UI specifications
                 leading to duplication of effort and increased
                 maintenance costs. To address such shortcomings, we
                 propose an integrated development methodology for use
                 case and task models. Use cases are generally used to
                 capture functional requirements whereas task models
                 specify the detailed user interactions with the UI. Our
                 methodology can assist practitioners in developing
                 software processes which allow these two kinds of
                 artifacts to be developed in a codependent and
                 integrated manner. We present our methodology, describe
                 its semantic foundations along with a set of formal
                 conformance relations, and introduce an automated
                 verification tool.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "27",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Denaro:2013:TAA,
  author =       "Giovanni Denaro and Mauro Pezz{\`e} and Davide Tosi",
  title =        "Test-and-adapt: an approach for improving service
                 interchangeability",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "28:1--28:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522921",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Service-oriented applications do not fully benefit
                 from standard APIs yet, and many applications fail to
                 use interchangeably all the services that implement a
                 standard service API. This article presents an approach
                 to develop adaptation strategies that improve service
                 interchangeability for service-oriented applications
                 based on standard APIs. In our approach, an adaptation
                 strategy consists of sets of parametric adaptation
                 plans (called test-and-adapt plans), which execute test
                 cases to reveal the occurrence of interchangeability
                 problems, and activate runtime adaptors according to
                 the test results. Throughout this article, we formalize
                 the structure of the parametric test-and-adapt plans
                 and of their execution semantics, present an algorithm
                 for identifying correct execution orders through sets
                 of test-and-adapt plans, provide empirical evidence of
                 the occurrence of interchangeability problems for
                 sample applications and services, and discuss the
                 effectiveness of the approach in terms of avoided
                 failures, runtime overheads and development costs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "28",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Martignoni:2013:MTC,
  author =       "Lorenzo Martignoni and Roberto Paleari and Alessandro
                 Reina and Giampaolo Fresi Roglia and Danilo Bruschi",
  title =        "A methodology for testing {CPU} emulators",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "29:1--29:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522922",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "A CPU emulator is a software system that simulates a
                 hardware CPU. Emulators are widely used by computer
                 scientists for various kind of activities (e.g.,
                 debugging, profiling, and malware analysis). Although
                 no theoretical limitation prevents developing an
                 emulator that faithfully emulates a physical CPU,
                 writing a fully featured emulator is a very challenging
                 and error prone task. Modern CISC architectures have a
                 very rich instruction set, some instructions lack
                 proper specifications, and others may have undefined
                 effects in corner cases. This article presents a
                 testing methodology specific for CPU emulators, based
                 on fuzzing. The emulator is ``stressed'' with specially
                 crafted test cases, to verify whether the CPU is
                 properly emulated or not. Improper behaviors of the
                 emulator are detected by running the same test case
                 concurrently on the emulated and on the physical CPUs
                 and by comparing the state of the two after the
                 execution. Differences in the final state testify
                 defects in the code of the emulator. We implemented
                 this methodology in a prototype (named as EmuFuzzer),
                 analyzed five state-of-the-art IA-32 emulators (QEMU,
                 Valgrind, Pin, BOCHS, and JPC), and found several
                 defects in each of them, some of which can prevent
                 proper execution of programs.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "29",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Chang:2013:EHH,
  author =       "Herv{\'e} Chang and Leonardo Mariani and Mauro
                 Pezz{\`e}",
  title =        "Exception handlers for healing component-based
                 systems",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "30:1--30:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522923",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "To design effective exception handlers, developers
                 must predict at design time the exceptional events that
                 may occur at runtime, and must implement the
                 corresponding handlers on the basis of their
                 predictions. Designing exception handlers for
                 component-based software systems is particularly
                 difficult because the information required to build
                 handlers is distributed between component and
                 application developers. Component developers know the
                 internal details of the components but ignore the
                 applications, while application developers own the
                 applications but cannot access the details required to
                 implement handlers in components. This article
                 addresses the problem of automatically healing the
                 infield failures that are caused by faulty integration
                 of OTS components. In the article, we propose a
                 technique and a methodology to decouple the tasks of
                 component and application developers, who will be able
                 to share information asynchronously and independently,
                 and communicate implicitly by developing and deploying
                 what we call healing connectors. Component developers
                 implement healing connectors on the basis of
                 information about the integration problems frequently
                 experienced by application developers. Application
                 developers easily and safely install healing connectors
                 in their applications without knowing the internal
                 details of the connectors. Healing connectors heal
                 failures activated by exceptions raised in the OTS
                 components actually deployed in the system. The article
                 defines healing connectors, introduces a methodology to
                 develop and deploy healing connectors, and presents
                 several case studies that indicate that healing
                 connectors are effective, reusable and efficient.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "30",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Xie:2013:TAR,
  author =       "Xiaoyuan Xie and Tsong Yueh Chen and Fei-Ching Kuo and
                 Baowen Xu",
  title =        "A theoretical analysis of the risk evaluation formulas
                 for spectrum-based fault localization",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "31:1--31:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522924",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "An important research area of Spectrum-Based Fault
                 Localization (SBFL) is the effectiveness of risk
                 evaluation formulas. Most previous studies have adopted
                 an empirical approach, which can hardly be considered
                 as sufficiently comprehensive because of the huge
                 number of combinations of various factors in SBFL.
                 Though some studies aimed at overcoming the limitations
                 of the empirical approach, none of them has provided a
                 completely satisfactory solution. Therefore, we provide
                 a theoretical investigation on the effectiveness of
                 risk evaluation formulas. We define two types of
                 relations between formulas, namely, equivalent and
                 better. To identify the relations between formulas, we
                 develop an innovative framework for the theoretical
                 investigation. Our framework is based on the concept
                 that the determinant for the effectiveness of a formula
                 is the number of statements with risk values higher
                 than the risk value of the faulty statement. We group
                 all program statements into three disjoint sets with
                 risk values higher than, equal to, and lower than the
                 risk value of the faulty statement, respectively. For
                 different formulas, the sizes of their sets are
                 compared using the notion of subset. We use this
                 framework to identify the maximal formulas which should
                 be the only formulas to be used in SBFL.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "31",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Qi:2013:PEB,
  author =       "Dawei Qi and Hoang D. T. Nguyen and Abhik
                 Roychoudhury",
  title =        "Path exploration based on symbolic output",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "32:1--32:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522925",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Efficient program path exploration is important for
                 many software engineering activities such as testing,
                 debugging, and verification. However, enumerating all
                 paths of a program is prohibitively expensive. In this
                 article, we develop a partitioning of program paths
                 based on the program output. Two program paths are
                 placed in the same partition if they derive the output
                 similarly, that is, the symbolic expression connecting
                 the output with the inputs is the same in both paths.
                 Our grouping of paths is gradually created by a smart
                 path exploration. Our experiments show the benefits of
                 the proposed path exploration in test-suite
                 construction. Our path partitioning produces a semantic
                 signature of a program-describing all the different
                 symbolic expressions that the output can assume along
                 different program paths. To reason about changes
                 between program versions, we can therefore analyze
                 their semantic signatures. In particular, we
                 demonstrate the applications of our path partitioning
                 in testing and debugging of software regressions.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "32",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Tateishi:2013:PIS,
  author =       "Takaaki Tateishi and Marco Pistoia and Omer Tripp",
  title =        "Path- and index-sensitive string analysis based on
                 monadic second-order logic",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "33:1--33:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522926",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/string-matching.bib;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We propose a novel technique for statically verifying
                 the strings generated by a program. The verification is
                 conducted by encoding the program in Monadic
                 Second-order Logic (M2L). We use M2L to describe
                 constraints among program variables and to abstract
                 built-in string operations. Once we encode a program in
                 M2L, a theorem prover for M2L, such as MONA, can
                 automatically check if a string generated by the
                 program satisfies a given specification, and if not,
                 exhibit a counterexample. With this approach, we can
                 naturally encode relationships among strings,
                 accounting also for cases in which a program
                 manipulates strings using indices. In addition, our
                 string analysis is path sensitive in that it accounts
                 for the effects of string and Boolean comparisons, as
                 well as regular-expression matches. We have implemented
                 our string analysis algorithm, and used it to augment
                 an industrial security analysis for Web applications by
                 automatically detecting and verifying sanitizers ---
                 methods that eliminate malicious patterns from
                 untrusted strings, making these strings safe to use in
                 security-sensitive operations. On the 8 benchmarks we
                 analyzed, our string analyzer discovered 128 previously
                 unknown sanitizers, compared to 71 sanitizers detected
                 by a previously presented string analysis.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "33",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Vouillon:2013:SCC,
  author =       "J{\'e}r{\^o}me Vouillon and Roberto {Di Cosmo}",
  title =        "On software component co-installability",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "34:1--34:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522927",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Modern software systems are built by composing
                 components drawn from large repositories, whose size
                 and complexity is increasing at a very fast pace. A
                 fundamental challenge for the maintainability and the
                 scalability of such software systems is the ability to
                 quickly identify the components that can or cannot be
                 installed together: this is the co-installability
                 problem, which is related to boolean satisfiability and
                 is known to be algorithmically hard. This article
                 develops a novel theoretical framework, based on
                 formally certified semantic preserving graph-theoretic
                 transformations, that allows us to associate to each
                 concrete component repository a much smaller one with a
                 simpler structure, that we call strongly flat, with
                 equivalent co-installability properties. This flat
                 repository can be displayed in a way that provides a
                 concise view of the co-installability issues in the
                 original repository, or used as a basis for various
                 algorithms related to co-installability, like the
                 efficient computation of strong conflicts between
                 components. The proofs contained in this work have been
                 machine checked using the Coq proof assistant.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "34",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Minku:2013:SEE,
  author =       "Leandro L. Minku and Xin Yao",
  title =        "Software effort estimation as a multiobjective
                 learning problem",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "35:1--35:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522928",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Ensembles of learning machines are promising for
                 software effort estimation (SEE), but need to be
                 tailored for this task to have their potential
                 exploited. A key issue when creating ensembles is to
                 produce diverse and accurate base models. Depending on
                 how differently different performance measures behave
                 for SEE, they could be used as a natural way of
                 creating SEE ensembles. We propose to view SEE model
                 creation as a multiobjective learning problem. A
                 multiobjective evolutionary algorithm (MOEA) is used to
                 better understand the tradeoff among different
                 performance measures by creating SEE models through the
                 simultaneous optimisation of these measures. We show
                 that the performance measures behave very differently,
                 presenting sometimes even opposite trends. They are
                 then used as a source of diversity for creating SEE
                 ensembles. A good tradeoff among different measures can
                 be obtained by using an ensemble of MOEA solutions.
                 This ensemble performs similarly or better than a model
                 that does not consider these measures explicitly.
                 Besides, MOEA is also flexible, allowing emphasis of a
                 particular measure if desired. In conclusion, MOEA can
                 be used to better understand the relationship among
                 performance measures and has shown to be very effective
                 in creating SEE models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "35",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Lizcano:2013:WCA,
  author =       "David Lizcano and Fernando Alonso and Javier Soriano
                 and Genoveva Lopez",
  title =        "A web-centred approach to end-user software
                 engineering",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "36:1--36:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522929",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "This article addresses one of the major end-user
                 software engineering (EUSE) challenges, namely, how to
                 motivate end users to apply unfamiliar software
                 engineering techniques and activities to achieve their
                 goal: translate requirements into software that meets
                 their needs. EUSE activities are secondary to the goal
                 that the program is helping to achieve and end-user
                 programming is opportunistic. The challenge is then to
                 find ways to incorporate EUSE activities into the
                 existing workflow without users having to make
                 substantial changes to the type of work they do or
                 their priorities. In this article, we set out an
                 approach to EUSE for web-based applications. We also
                 propose a software lifecycle that is consistent with
                 the conditions and priorities of end users without
                 programming skills and is well-aligned with EUSE's
                 characteristic informality, ambiguity and
                 opportunisticness. Users applying this lifecycle manage
                 to find solutions that they would otherwise be unable
                 to identify. They also develop quality products. Users
                 of this approach will not have to be acquainted with
                 software engineering, as a framework will take them
                 through the web-centred EUSE lifecycle step-by-step. We
                 also report a statistical experiment in which users
                 develop web software with and without a framework to
                 guide them through the lifecycle. Its aim is to
                 validate the applicability of our framework-driven
                 lifecycle.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "36",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Mcmillan:2013:PSR,
  author =       "Collin Mcmillan and Denys Poshyvanyk and Mark
                 Grechanik and Qing Xie and Chen Fu",
  title =        "{Portfolio}: Searching for relevant functions and
                 their usages in millions of lines of code",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "37:1--37:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522930",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/pagerank.bib;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Different studies show that programmers are more
                 interested in finding definitions of functions and
                 their uses than variables, statements, or ordinary code
                 fragments. Therefore, developers require support in
                 finding relevant functions and determining how these
                 functions are used. Unfortunately, existing code search
                 engines do not provide enough of this support to
                 developers, thus reducing the effectiveness of code
                 reuse. We provide this support to programmers in a code
                 search system called Portfolio that retrieves and
                 visualizes relevant functions and their usages. We have
                 built Portfolio using a combination of models that
                 address surfing behavior of programmers and sharing
                 related concepts among functions. We conducted two
                 experiments: first, an experiment with 49 C/C++
                 programmers to compare Portfolio to Google Code Search
                 and Koders using a standard methodology for evaluating
                 information-retrieval-based engines; and second, an
                 experiment with 19 Java programmers to compare
                 Portfolio to Koders. The results show with strong
                 statistical significance that users find more relevant
                 functions with higher precision with Portfolio than
                 with Google Code Search and Koders. We also show that
                 by using PageRank, Portfolio is able to rank returned
                 relevant functions more efficiently.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "37",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
  keywords =     "PageRank algorithm",
}

@Article{Wursch:2013:EQF,
  author =       "Michael W{\"u}rsch and Emanuel Giger and Harald C.
                 Gall",
  title =        "Evaluating a query framework for software evolution
                 data",
  journal =      j-TOSEM,
  volume =       "22",
  number =       "4",
  pages =        "38:1--38:??",
  month =        oct,
  year =         "2013",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2522920.2522931",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Oct 30 12:18:03 MDT 2013",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "With the steady advances in tooling to support
                 software engineering, mastering all the features of
                 modern IDEs, version control systems, and project
                 trackers is becoming increasingly difficult. Answering
                 even the most common developer questions can be
                 surprisingly tedious and difficult. In this article we
                 present a user study with 35 subjects to evaluate our
                 quasi-natural language interface that provides access
                 to various facets of the evolution of a software system
                 but requires almost zero learning effort. Our approach
                 is tightly woven into the Eclipse IDE and allows
                 developers to answer questions related to source code,
                 development history, or bug and issue management. The
                 results of our evaluation show that our query interface
                 can outperform classical software engineering tools in
                 terms of correctness, while yielding significant time
                 savings to its users and greatly advancing the state of
                 the art in terms of usability and learnability.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "38",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Rosenblum:2014:Ea,
  author =       "David S. Rosenblum",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "1:1--1:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559939",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "1",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Haesevoets:2014:ACS,
  author =       "Robrecht Haesevoets and Danny Weyns and Tom Holvoet",
  title =        "Architecture-centric support for adaptive service
                 collaborations",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "2:1--2:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559937",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In today's volatile business environments,
                 collaboration between information systems, both within
                 and across company borders, has become essential to
                 success. An efficient supply chain, for example,
                 requires the collaboration of distributed and
                 heterogeneous systems of multiple companies. Developing
                 such collaborative applications and building the
                 supporting information systems poses several
                 engineering challenges. A key challenge is to manage
                 the ever-growing design complexity. In this article, we
                 argue that software architecture should play a more
                 prominent role in the development of collaborative
                 applications. This can help to better manage design
                 complexity by modularizing collaborations and
                 separating concerns. State-of-the-art solutions,
                 however, often lack proper abstractions for modeling
                 collaborations at architectural level or do not reify
                 these abstractions at detailed design and
                 implementation level. Developers, on the other hand,
                 rely on middleware, business process management, and
                 Web services, techniques that mainly focus on low-level
                 infrastructure. To address the problem of managing the
                 design complexity of collaborative applications, we
                 present Macodo. Macodo consists of three complementary
                 parts: (1) a set of abstractions for modeling adaptive
                 collaborations, (2) a set of architectural views, the
                 main contribution of this article, that reify these
                 abstractions at architectural level, and (3) a
                 proof-of-concept middleware infrastructure that
                 supports the architectural abstractions at design and
                 implementation level. We evaluate the architectural
                 views in a controlled experiment. Results show that the
                 use of Macodo can reduce fault density and design
                 complexity, and improve reuse and productivity. The
                 main contributions of this article are illustrated in a
                 supply chain management case.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "2",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Tappenden:2014:ACC,
  author =       "Andrew F. Tappenden and James Miller",
  title =        "Automated cookie collection testing",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "3:1--3:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559936",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Cookies are used by over 80\% of Web applications
                 utilizing dynamic Web application frameworks.
                 Applications deploying cookies must be rigorously
                 verified to ensure that the application is robust and
                 secure. Given the intense time-to-market pressures
                 faced by modern Web applications, testing strategies
                 that are low cost and automatable are required.
                 Automated Cookie Collection Testing (CCT) is presented,
                 and is empirically demonstrated to be a low-cost and
                 highly effective automated testing solution for modern
                 Web applications. Automatable test oracles and
                 evaluation metrics specifically designed for Web
                 applications are presented, and are shown to be
                 significant diagnostic tests. Automated CCT is shown to
                 detect faults within five real-world Web applications.
                 A case study of over 580 test results for a single
                 application is presented demonstrating that automated
                 CCT is an effective testing strategy. Moreover, CCT is
                 found to detect security bugs in a Web application
                 released into full production.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "3",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Bavota:2014:ISM,
  author =       "Gabriele Bavota and Malcom Gethers and Rocco Oliveto
                 and Denys Poshyvanyk and Andrea de Lucia",
  title =        "Improving software modularization via automated
                 analysis of latent topics and dependencies",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "4:1--4:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559935",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Oftentimes, during software maintenance the original
                 program modularization decays, thus reducing its
                 quality. One of the main reasons for such architectural
                 erosion is suboptimal placement of source-code classes
                 in software packages. To alleviate this issue, we
                 propose an automated approach to help developers
                 improve the quality of software modularization. Our
                 approach analyzes underlying latent topics in source
                 code as well as structural dependencies to recommend
                 (and explain) refactoring operations aiming at moving a
                 class to a more suitable package. The topics are
                 acquired via Relational Topic Models (RTM), a
                 probabilistic topic modeling technique. The resulting
                 tool, coined as R 3 (Rational Refactoring via RTM), has
                 been evaluated in two empirical studies. The results of
                 the first study conducted on nine software systems
                 indicate that R 3 provides a coupling reduction from
                 10\% to 30\% among the software modules. The second
                 study with 62 developers confirms that R 3 is able to
                 provide meaningful recommendations (and explanations)
                 for move class refactoring. Specifically, more than
                 70\% of the recommendations were considered meaningful
                 from a functional point of view.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "4",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Liu:2014:DWN,
  author =       "Xiao Liu and Yun Yang and Dong Yuan and Jinjun Chen",
  title =        "Do we need to handle every temporal violation in
                 scientific workflow systems?",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "5:1--5:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559938",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Scientific processes are usually time constrained with
                 overall deadlines and local milestones. In scientific
                 workflow systems, due to the dynamic nature of the
                 underlying computing infrastructures such as grid and
                 cloud, execution delays often take place and result in
                 a large number of temporal violations. Since temporal
                 violation handling is expensive in terms of both
                 monetary costs and time overheads, an essential
                 question aroused is ``do we need to handle every
                 temporal violation in scientific workflow systems?''
                 The answer would be ``true'' according to existing
                 works on workflow temporal management which adopt the
                 philosophy similar to the handling of functional
                 exceptions, that is, every temporal violation should be
                 handled whenever it is detected. However, based on our
                 observation, the phenomenon of self-recovery where
                 execution delays can be automatically compensated for
                 by the saved execution time of subsequent workflow
                 activities has been entirely overlooked. Therefore,
                 considering the nonfunctional nature of temporal
                 violations, our answer is ``not necessarily true.'' To
                 take advantage of self-recovery, this article proposes
                 a novel adaptive temporal violation handling point
                 selection strategy where this phenomenon is effectively
                 utilised to avoid unnecessary temporal violation
                 handling. Based on simulations of both real-world
                 scientific workflows and randomly generated test cases,
                 the experimental results demonstrate that our strategy
                 can significantly reduce the cost on temporal violation
                 handling by over 96\% while maintaining extreme low
                 violation rate under normal circumstances.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "5",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Mari:2014:MBS,
  author =       "Federico Mari and Igor Melatti and Ivano Salvo and
                 Enrico Tronci",
  title =        "Model-based synthesis of control software from
                 system-level formal specifications",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "6:1--6:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559934",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many embedded systems are indeed software-based
                 control systems, that is, control systems whose
                 controller consists of control software running on a
                 microcontroller device. This motivates investigation on
                 formal model-based design approaches for automatic
                 synthesis of embedded systems control software. We
                 present an algorithm, along with a tool QKS
                 implementing it, that from a formal model (as a
                 discrete-time linear hybrid system ) of the controlled
                 system ( plant ), implementation specifications (that
                 is, number of bits in the Analog-to-Digital, AD,
                 conversion) and system-level formal specifications
                 (that is, safety and liveness requirements for the
                 closed loop system ) returns correct-by-construction
                 control software that has a Worst-Case Execution Time
                 (WCET) linear in the number of AD bits and meets the
                 given specifications. We show feasibility of our
                 approach by presenting experimental results on using it
                 to synthesize control software for a buck DC-DC
                 converter, a widely used mixed-mode analog circuit, and
                 for the inverted pendulum.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "6",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Bodden:2014:JPI,
  author =       "Eric Bodden and {\'E}ric Tanter and Milton Inostroza",
  title =        "Join point interfaces for safe and flexible decoupling
                 of aspects",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "7:1--7:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559933",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "In current aspect-oriented systems, aspects usually
                 carry, through their pointcuts, explicit references to
                 the base code. Those references are fragile and hinder
                 important software engineering properties such as
                 modular reasoning and independent evolution of aspects
                 and base code. In this work, we introduce a novel
                 abstraction called Join Point Interface, which, by
                 design, aids modular reasoning and independent
                 evolution by decoupling aspects from base code and by
                 providing a modular type-checking algorithm. Join point
                 interfaces can be used both with implicit announcement
                 through pointcuts, and with explicit announcement,
                 using closure join points. Join point interfaces
                 further offer polymorphic dispatch on join points, with
                 an advice-dispatch semantics akin to multimethods. To
                 support flexible join point matching, we incorporate
                 into our language an earlier proposal for generic
                 advice, and introduce a mechanism for controlled global
                 quantification. We motivate each language feature in
                 detail, showing that it is necessary to obtain a
                 language design that is both type safe and flexible
                 enough to support typical aspect-oriented programming
                 idioms. We have implemented join point interfaces as an
                 open-source extension to AspectJ. A case study on
                 existing aspect-oriented programs supports our design,
                 and in particular shows the necessity of both generic
                 interfaces and some mechanism for global
                 quantification.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "7",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Masri:2014:PCC,
  author =       "Wes Masri and Rawad Abou Assi",
  title =        "Prevalence of coincidental correctness and mitigation
                 of its impact on fault localization",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "8:1--8:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559932",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Researchers have argued that for failure to be
                 observed the following three conditions must be met: $
                 C_R $ = the defect was reached; $ C_I $ = the program
                 has transitioned into an infectious state; and $ C_P $
                 = the infection has propagated to the output.
                 Coincidental Correctness (CC) arises when the program
                 produces the correct output while condition $ C_R $ is
                 met but not $ C_P $. We recognize two forms of
                 coincidental correctness, weak and strong. In weak CC,
                 $ C_R $ is met, whereas $ C_I $ might or might not be
                 met, whereas in strong CC, both $ C_R $ and $ C_I $ are
                 met. In this work we first show that CC is prevalent in
                 both of its forms and demonstrate that it is a safety
                 reducing factor for Coverage-Based Fault Localization
                 (CBFL). We then propose two techniques for cleansing
                 test suites from coincidental correctness to enhance
                 CBFL, given that the test cases have already been
                 classified as failing or passing. We evaluated the
                 effectiveness of our techniques by empirically
                 quantifying their accuracy in identifying weak CC
                 tests. The results were promising, for example, the
                 better performing technique, using 105 test suites and
                 statement coverage, exhibited 9\% false negatives, 30\%
                 false positives, and no false negatives nor false
                 positives in 14.3\% of the test suites. Also using 73
                 test suites and more complex coverage, the numbers were
                 12\%, 19\%, and 15\%, respectively.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "8",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Briand:2014:TSD,
  author =       "Lionel Briand and Davide Falessi and Shiva Nejati and
                 Mehrdad Sabetzadeh and Tao Yue",
  title =        "Traceability and {SysML} design slices to support
                 safety inspections: a controlled experiment",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "9:1--9:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2559978",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Certifying safety-critical software and ensuring its
                 safety requires checking the conformance between safety
                 requirements and design. Increasingly, the development
                 of safety-critical software relies on modeling, and the
                 System Modeling Language (SysML) is now commonly used
                 in many industry sectors. Inspecting safety conformance
                 by comparing design models against safety requirements
                 requires safety inspectors to browse through large
                 models and is consequently time consuming and
                 error-prone. To address this, we have devised a
                 mechanism to establish traceability between
                 (functional) safety requirements and SysML design
                 models to extract design slices (model fragments) that
                 filter out irrelevant details but keep enough context
                 information for the slices to be easy to inspect and
                 understand. In this article, we report on a controlled
                 experiment assessing the impact of the traceability and
                 slicing mechanism on inspectors' conformance decisions
                 and effort. Results show a significant decrease in
                 effort and an increase in decisions' correctness and
                 level of certainty.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "9",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Zhou:2014:DSP,
  author =       "Yuming Zhou and Baowen Xu and Hareton Leung and Lin
                 Chen",
  title =        "An in-depth study of the potentially confounding
                 effect of class size in fault prediction",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "1",
  pages =        "10:1--10:??",
  month =        feb,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2556777",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Sat Feb 15 11:14:44 MST 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Background. The extent of the potentially confounding
                 effect of class size in the fault prediction context is
                 not clear, nor is the method to remove the potentially
                 confounding effect, or the influence of this removal on
                 the performance of fault-proneness prediction models.
                 Objective. We aim to provide an in-depth understanding
                 of the effect of class size on the true associations
                 between object-oriented metrics and fault-proneness.
                 Method. We first employ statistical methods to examine
                 the extent of the potentially confounding effect of
                 class size in the fault prediction context. After that,
                 we propose a linear regression-based method to remove
                 the potentially confounding effect. Finally, we
                 empirically investigate whether this removal could
                 improve the prediction performance of fault-proneness
                 prediction models. Results. Based on open-source
                 software systems, we found: (a) the confounding effect
                 of class size on the associations between
                 object-oriented metrics and fault-proneness in general
                 exists; (b) the proposed linear regression-based method
                 can effectively remove the confounding effect; and (c)
                 after removing the confounding effect, the prediction
                 performance of fault prediction models with respect to
                 both ranking and classification can in general be
                 significantly improved. Conclusion. We should remove
                 the confounding effect of class size when building
                 fault prediction models.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "10",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Rosenblum:2014:Eb,
  author =       "David S. Rosenblum",
  title =        "Editorial",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "2",
  pages =        "11:1--11:??",
  month =        mar,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2581373",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 2 16:21:37 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "11",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Pan:2014:GTG,
  author =       "Kai Pan and Xintao Wu and Tao Xie",
  title =        "Guided test generation for database applications via
                 synthesized database interactions",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "2",
  pages =        "12:1--12:??",
  month =        mar,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491529",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 2 16:21:37 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Testing database applications typically requires the
                 generation of tests consisting of both program inputs
                 and database states. Recently, a testing technique
                 called Dynamic Symbolic Execution (DSE) has been
                 proposed to reduce manual effort in test generation for
                 software applications. However, applying DSE to
                 generate tests for database applications faces various
                 technical challenges. For example, the database
                 application under test needs to physically connect to
                 the associated database, which may not be available for
                 various reasons. The program inputs whose values are
                 used to form the executed queries are not treated
                 symbolically, posing difficulties for generating valid
                 database states or appropriate database states for
                 achieving high coverage of query-result-manipulation
                 code. To address these challenges, in this article, we
                 propose an approach called SynDB that synthesizes new
                 database interactions to replace the original ones from
                 the database application under test. In this way, we
                 bridge various constraints within a database
                 application: query-construction constraints, query
                 constraints, database schema constraints, and
                 query-result-manipulation constraints. We then apply a
                 state-of-the-art DSE engine called Pex for .NET from
                 Microsoft Research to generate both program inputs and
                 database states. The evaluation results show that tests
                 generated by our approach can achieve higher code
                 coverage than existing test generation approaches for
                 database applications.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "12",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Scanniello:2014:IUA,
  author =       "Giuseppe Scanniello and Carmine Gravino and Marcela
                 Genero and Jose' A. Cruz-Lemus and Genoveffa Tortora",
  title =        "On the impact of {UML} analysis models on source-code
                 comprehensibility and modifiability",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "2",
  pages =        "13:1--13:??",
  month =        mar,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2491912",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 2 16:21:37 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "We carried out a family of experiments to investigate
                 whether the use of UML models produced in the
                 requirements analysis process helps in the
                 comprehensibility and modifiability of source code. The
                 family consists of a controlled experiment and 3
                 external replications carried out with students and
                 professionals from Italy and Spain. 86 participants
                 with different abilities and levels of experience with
                 UML took part. The results of the experiments were
                 integrated through the use of meta-analysis. The
                 results of both the individual experiments and
                 meta-analysis indicate that UML models produced in the
                 requirements analysis process influence neither the
                 comprehensibility of source code nor its
                 modifiability.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "13",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Fritz:2014:DKM,
  author =       "Thomas Fritz and Gail C. Murphy and Emerson
                 Murphy-Hill and Jingwen Ou and Emily Hill",
  title =        "Degree-of-knowledge: Modeling a developer's knowledge
                 of code",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "2",
  pages =        "14:1--14:??",
  month =        mar,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2512207",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 2 16:21:37 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "As a software system evolves, the system's codebase
                 constantly changes, making it difficult for developers
                 to answer such questions as who is knowledgeable about
                 particular parts of the code or who needs to know about
                 changes made. In this article, we show that an
                 externalized model of a developer's individual
                 knowledge of code can make it easier for developers to
                 answer such questions. We introduce a
                 degree-of-knowledge model that computes automatically,
                 for each source-code element in a codebase, a real
                 value that represents a developer's knowledge of that
                 element based on a developer's authorship and
                 interaction data. We present evidence that shows that
                 both authorship and interaction data of the code are
                 important in characterizing a developer's knowledge of
                 code. We report on the usage of our model in case
                 studies on expert finding, knowledge transfer, and
                 identifying changes of interest. We show that our model
                 improves upon an existing expertise-finding approach
                 and can accurately identify changes for which a
                 developer should likely be aware. We discuss how our
                 model may provide a starting point for knowledge
                 transfer but that more refinement is needed. Finally,
                 we discuss the robustness of the model across multiple
                 development sites.",
  acknowledgement = ack-nhfb,
  ajournal =     "ACM Trans. Softw. Eng. Methodol.",
  articleno =    "14",
  fjournal =     "ACM Transactions on Software Engineering and
                 Methodology",
  journal-URL =  "https://dl.acm.org/loi/tosem",
}

@Article{Lu:2014:RBS,
  author =       "Lunjin Lu and Dae-Kyoo Kim",
  title =        "Required behavior of sequence diagrams: Semantics and
                 conformance",
  journal =      j-TOSEM,
  volume =       "23",
  number =       "2",
  pages =        "15:1--15:??",
  month =        mar,
  year =         "2014",
  CODEN =        "ATSMER",
  DOI =          "https://doi.org/10.1145/2523108",
  ISSN =         "1049-331X (print), 1557-7392 (electronic)",
  ISSN-L =       "1049-331X",
  bibdate =      "Wed Apr 2 16:21:37 MDT 2014",
  bibsource =    "http://www.acm.org/pubs/contents/journals/tosem/;
                 https://www.math.utah.edu/pub/tex/bib/tosem.bib",
  abstract =     "Many reusable software artifacts such as design
                 patterns and design aspects make use of UML sequence
                 diagrams to describe interaction behaviors. When a
                 pattern or an aspect is reused in an application, it is
                 important to ensure that the sequence diagrams for the
                 application conform to the corresponding sequence
                 diagrams for the pattern or aspect. Reasoning about
                 conformance relationship between sequence diagrams has
                 not been addressed adequately in literature. In this
                 article, we focus on required behaviors specified by a
                 UML sequence diagram and provide a semantic-based
                 formalization of conformance relationships between
                 sequence diagrams. A novel trace semantics is first
                 given that captures precisely required behaviors. A
                 refinement relation between sequence diagrams is then
                 defined based on the semantics. The refinement relation
                 allows a sequence diagram to be refined by changing its
                 structure