%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "2.60",
%%%     date            = "21 May 2014",
%%%     time            = "19:00:45 MDT",
%%%     filename        = "tods.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "http://www.math.utah.edu/~beebe",
%%%     checksum        = "43640 42720 226078 2273299",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography, BibTeX, database systems,
%%%                        TODS",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a COMPLETE BibTeX bibliography for
%%%                        ACM Transactions on Database Systems (TODS)
%%%                        (CODEN ATDSD3, ISSN 0362-5915 (print),
%%%                        1557-4644 (electronic)), which began
%%%                        publishing in March 1976.
%%%
%%%                        The companion bibliography pods.bib covers
%%%                        the ACM SIGACT-SIGMOD Symposia on
%%%                        Principles of Database Systems, and the
%%%                        companion bibliography vldb.bib covers the
%%%                        International Conferences on Very Large
%%%                        Data Bases.  The companion bibliography
%%%                        sigmod.bib covers the ACM Special Interest
%%%                        Group on Management of Data SIGMOD Record
%%%                        newsletter.
%%%
%%%                        The journal has a World Wide Web site at
%%%
%%%                            http://www.acm.org/tods/
%%%                            http://www.acm.org/pubs/contents/journals/tods/
%%%                            http://portal.acm.org/browse_dl.cfm?idx=J777
%%%
%%%                        At version 2.60, the year coverage looked
%%%                        like this:
%%%
%%%                             1975 (   1)    1989 (  23)    2003 (  15)
%%%                             1976 (  20)    1990 (  22)    2004 (  22)
%%%                             1977 (  23)    1991 (  24)    2005 (  29)
%%%                             1978 (  21)    1992 (  20)    2006 (  38)
%%%                             1979 (  27)    1993 (  20)    2007 (  30)
%%%                             1980 (  25)    1994 (  17)    2008 (  31)
%%%                             1981 (  30)    1995 (  13)    2009 (  25)
%%%                             1982 (  31)    1996 (  14)    2010 (  29)
%%%                             1983 (  30)    1997 (  14)    2011 (  27)
%%%                             1984 (  32)    1998 (  14)    2012 (  32)
%%%                             1985 (  26)    1999 (  13)    2013 (  28)
%%%                             1986 (  25)    2000 (  12)    2014 (  17)
%%%                             1987 (  25)    2001 (  12)
%%%                             1988 (  18)    2002 (  11)
%%%                             19xx (   2)
%%%
%%%                             Article:        879
%%%                             Book:             3
%%%                             InProceedings:    1
%%%                             Proceedings:      5
%%%
%%%                             Total entries:  888
%%%
%%%                        This bibliography was initially built from
%%%                        searches in the OCLC Content1st database.
%%%                        Additions were then made from all of the
%%%                        bibliographies in the TeX User Group
%%%                        collection, from bibliographies in the
%%%                        author's personal files, from the IEEE
%%%                        INSPEC CD-ROM database (1989--1995), from
%%%                        the Compendex database, from the American
%%%                        Mathematical Society MathSciNet database,
%%%                        and from the computer science bibliography
%%%                        collection on ftp.ira.uka.de in
%%%                        /pub/bibliography to which many people of
%%%                        have contributed.  The snapshot of this
%%%                        collection was taken on 5-May-1994, and it
%%%                        consists of 441 BibTeX files, 2,672,675
%%%                        lines, 205,289 entries, and 6,375
%%%                        <at>String{} abbreviations, occupying
%%%                        94.8MB of disk space.  Missing data in many
%%%                        entries were supplied after consulting
%%%                        original journal issues.
%%%
%%%                        Numerous errors in the sources noted above
%%%                        have been corrected.  Spelling has been
%%%                        verified with the UNIX spell and GNU ispell
%%%                        programs using the exception dictionary
%%%                        stored in the companion file with extension
%%%                        .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen as
%%%                        name:year:abbrev, where name is the family
%%%                        name of the first author or editor, year is a
%%%                        4-digit number, and abbrev is a 3-letter
%%%                        condensation of important title words.
%%%                        Citation labels were automatically generated
%%%                        by software developed for the BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, with the help of
%%%                        ``bibsort -byvolume''.  The bibsort utility
%%%                        is available from ftp.math.utah.edu in
%%%                        /pub/tex/bib.
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================

@Preamble{
    "\hyphenation{ }"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:

@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|http://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:

@String{j-BIT                   = "BIT"}

@String{j-CACM                  = "Communications of the ACM"}

@String{j-TODS                  = "ACM Transactions on Database Systems"}

%%% ====================================================================
%%% Publishers and their addresses:

@String{pub-ACM                 = "ACM Press"}

@String{pub-ACM:adr             = "New York, NY 10036, USA"}

@String{pub-IEEE                = "IEEE Computer Society Press"}

@String{pub-IEEE:adr            = "1109 Spring Street, Suite 300, Silver
                                   Spring, MD 20910, USA"}

@String{pub-MORGAN-KAUFMANN     = "Morgan Kaufmann Publishers"}

@String{pub-MORGAN-KAUFMANN:adr = "Los Altos, CA 94022, USA"}

%%% ====================================================================
%%% Bibliography entries:

@Article{Yao:1977:ABA,
  author =       "S. B. Yao",
  title =        "Approximating Block Accesses in Database
                 Organization",
  journal =      j-CACM,
  volume =       "20",
  number =       "4",
  pages =        "260--261",
  month =        apr,
  year =         "1977",
  CODEN =        "CACMA2",
  ISSN =         "0001-0782 (print), 1557-7317 (electronic)",
  bibdate =      "Tue Sep 20 23:14:33 1994",
  bibsource =    "ftp://ftp.ira.uka.de/pub/bibliography/Database/Graefe.bib;
                 ftp://ftp.ira.uka.de/pub/bibliography/Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in \cite{Yao:1977:ABM}.",
  fjournal =     "Communications of the ACM",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J79",
  keywords =     "selectivity estimation I/O cost query optimization
                 CACM",
}

@Article{Hsiao:1976:ATD,
  author =       "David K. Hsiao",
  title =        "{ACM Transactions on Database Systems}: aim and
                 scope",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "1--2",
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-1/p1-hsiao/p1-hsiao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-1/p1-hsiao/",
  abstract =     "Record-keeping and decision-making in industry and
                 government are increasingly based on data stored in
                 computer processable databases. Thus the need for
                 improved computer technology for building, managing,
                 and using these databases is clearly evident. This need
                 is particularly acute in a complex society where the
                 interrelationships among various aspects of the society
                 must be identified and represented. The data which must
                 be used to represent these relationships are growing
                 more complex in nature and becoming greater in size.
                 Furthermore, the increasing on-line use of computer
                 systems and the proliferation and mass introduction of
                 multilevel secondary storage suggests that future
                 computer systems will be primarily oriented toward
                 database management. The large size of future on-line
                 databases will require the computer system to manage
                 local as well as physical resources. The management of
                 logical resources is concerned with the organization,
                 access, update, storage, and sharing of the data and
                 programs in the database. In addition, the sharing of
                 data means that the database system must be capable of
                 providing privacy protection and of controlling access
                 to the users' data. The term {\em data\/} is
                 interpreted broadly to include textual, numeric, and
                 signal data as well as data found in structured
                 records.\par

                 The aim of {\em ACM Transactions on Database Systems\/}
                 (TODS) is to serve as a focal point for an integrated
                 dissemination of database research and development on
                 storage and processor hardware, system software,
                 applications, information science, information
                 analysis, and file management. These areas are
                 particularly relevant to the following ACM Special
                 Interest Groups: Business Data Processing (SIGBDP),
                 Information Retrieval (SIGIR), and Management of Data
                 (SIGMOD). TODS will also embrace parts of the
                 Management/Database Systems and the Information
                 Retrieval and Language Processing sections of {\em
                 Communications of the ACM}.\par

                 High quality papers on all aspects of computer database
                 systems will be published in TODS. The scope of TODS
                 emphasizes data structures; storage organization; data
                 collection and dissemination; search and retrieval
                 strategies; update strategies; access control
                 techniques; data integrity; security and protection;
                 design and implementation of database software;
                 database related languages including data description
                 languages, query languages, and procedural and
                 nonprocedural data manipulation languages; language
                 processing; analysis and classification of data;
                 database utilities; data translation techniques;
                 distributed database problems and techniques; database
                 recovery and restart; database restructuring; adaptive
                 data structures; concurrent access techniques; database
                 computer hardware architecture; performance and
                 evaluation; intelligent front ends; and related
                 subjects such as privacy and economic issues.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4)",
}

@Article{Chen:1976:ERM,
  author =       "Peter Pin-Shan S. Chen",
  title =        "The Entity-Relationship Model: Toward a Unified View
                 of Data",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "9--36",
  month =        mar,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compiler/prog.lang.theory.bib; Database/Graefe.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib; Object/Nierstrasz.bib",
  note =         "Reprinted in \cite{Stonebraker:1988:RDS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-1/p9-chen/p9-chen.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-1/p9-chen/",
  abstract =     "A data model, called the entity-relationship model, is
                 proposed. This model incorporates some of the important
                 semantic information about the real world. A special
                 diagrammatic technique is introduced as a tool for
                 database design. An example of database design and
                 description using the model and the diagrammatic
                 technique is given. Some implications for data
                 integrity, information retrieval, and data manipulation
                 are discussed.\par

                 The entity-relationship model can be used as a basis
                 for unification of different views of data: the network
                 model, the relational model, and the entity set model.
                 Semantic ambiguities in these models are analyzed.
                 Possible ways to derive their views of data from the
                 entity-relationship model are presented.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Data Base Task Group; data definition and
                 manipulation; data integrity and consistency; data
                 models; database design; dblit; entity set model;
                 entity-relationship; entity-relationship model; logical
                 view of data; network model; relational model;
                 semantics of data; TODS",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Bayer:1976:EST,
  author =       "R. Bayer and J. K. Metzger",
  title =        "On the Encipherment of Search Trees and Random Access
                 Files",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "37--52",
  month =        mar,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in \cite[p.~508--510]{Kerr:1975:PIC}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-1/p37-bayer/p37-bayer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-1/p37-bayer/",
  abstract =     "The securing of information in indexed, random access
                 files by means of privacy transformations must be
                 considered as a problem distinct from that for
                 sequential files. Not only must processing overhead due
                 to encrypting be considered, but also threats to
                 encipherment arising from updating and the file
                 structure itself must be countered. A general
                 encipherment scheme is proposed for files maintained in
                 a paged structure in secondary storage. This is applied
                 to the encipherment of indexes organized as $B$-trees;
                 a $B$-tree is a particular type of multiway search
                 tree. Threats to the encipherment of $B$-trees,
                 especially relating to updating, are examined, and
                 countermeasures are proposed for each. In addition, the
                 effect of encipherment on file access and update, on
                 paging mechanisms, and on files related to the
                 enciphered index are discussed. Many of the concepts
                 presented may be readily transferred to other forms of
                 multiway index trees and to binary search trees.",
  acknowledgement = ack-nhfb,
  annote =       "Trees versus hashing as his 1974 IFIP paper?",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "",
  subject =      "Software --- Operating Systems --- Security and
                 Protection (D.4.6): {\bf Access controls}; Software ---
                 Operating Systems --- Security and Protection (D.4.6):
                 {\bf Cryptographic controls}",
}

@Article{Lin:1976:DRA,
  author =       "Chyuan Shiun Lin and Diane C. P. Smith and John Miles
                 Smith",
  title =        "The design of a rotating associative memory for
                 relational database applications",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "53--65",
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-1/p53-lin/p53-lin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-1/p53-lin/",
  abstract =     "The design and motivation for a rotating associative
                 relational store (RARES) is described. RARES is
                 designed to enhance the performance of an optimizing
                 relational query interface by supporting important high
                 level optimization techniques. In particular, it can
                 perform tuple selection operations at the storage
                 device and also can provide a mechanism for efficient
                 sorting. Like other designs for rotating associative
                 stores, RARES contains search logic which is attached
                 to the heads of a rotating head-per-track storage
                 device. RARES is distinct from other designs in that it
                 utilizes a novel ``orthogonal'' storage layout. This
                 layout allows a high output rate of selected tuples
                 even when a sort order in the stored relation must be
                 preserved. As in certain other designs, RARES can
                 usually output a tuple as soon as it is found to
                 satisfy the selection criteria. However, relative to
                 these designs, the orthogonal layout allows an order of
                 magnitude reduction in the capacity of storage local to
                 the search logic.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative memory; content addressability; data
                 organization; head-per-track disks; memory systems;
                 relational database; rotating devices; search logic;
                 sorting technique",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Mahmoud:1976:OAR,
  author =       "Samy Mahmoud and J. S. Riordon",
  title =        "Optimal Allocation of Resources in Distributed
                 Information Networks",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "66--78",
  month =        mar,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-1/p66-mahmoud/p66-mahmoud.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-1/p66-mahmoud/",
  abstract =     "The problems of file allocation and capacity
                 assignment in a fixed topology distributed computer
                 network are examined. These two aspects of the design
                 are tightly coupled by means of an average message
                 delay constraint. The objective is to allocate copies
                 of information files to network nodes and capacities to
                 network links so that a minimum cost is achieved
                 subject to network delay and file availability
                 constraints. A model for solving the problem is
                 formulated and the resulting optimization problem is
                 shown to fall into a class of nonlinear integer
                 programming problems. Deterministic techniques for
                 solving this class of problems are computationally
                 cumbersome, even for small size problems. A new
                 heuristic algorithm is developed, which is based on a
                 decomposition technique that greatly reduces the
                 computational complexity of the problem. Numerical
                 results for a variety of network configurations
                 indicate that the heuristic algorithm, while not
                 theoretically convergent, yields practicable low cost
                 solutions with substantial savings in computer
                 processing time and storage requirements. Moreover, it
                 is shown that this algorithm is capable of solving
                 realistic network problems whose solutions using
                 deterministic techniques are computationally
                 intractable.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data files; distributed computed; information
                 networks; link capacities; resource sharing",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2)",
}

@Article{Stemple:1976:DMF,
  author =       "David W. Stemple",
  title =        "A Database Management Facility for Automatic
                 Generation of Database Managers",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "79--94",
  month =        mar,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in \cite[p.~252]{Kerr:1975:PIC}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-1/p79-stemple/p79-stemple.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-1/p79-stemple/",
  abstract =     "A facility is described for the implementation of
                 database management systems having high degrees of {\em
                 horizontal\/} data independence, i.e. independence from
                 chosen logical properties of a database as opposed to
                 {\em vertical\/} independence from storage structures.
                 The facility consists of a high level language for the
                 specification of virtual database managers, a compiler
                 from this language to a pseudomachine language, and an
                 interpreter for the pseudomachine language.\par

                 It is shown how this facility can be used to produce
                 efficient database management systems with any degree
                 of both horizontal and vertical data independence. Two
                 key features of this tool are the compilation of
                 tailored database managers from individual schemas and
                 multiple levels of optional binding.",
  acknowledgement = ack-nhfb,
  annote =       "Describes SLUSH and SLIM, a proposed compiler and
                 interpreter to operate on network schemas with
                 adjustable binding times.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data independence; database management systems",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management (H.2); Software --- Operating Systems ---
                 Systems Programs and Utilities (D.4.9): {\bf make}",
}

@Article{Astrahan:1976:SRR,
  author =       "M. M. Astrahan and M. W. Blasgen and D. D. Chamberlin
                 and K. P. Eswaran and J. N. Gray and P. P. Griffiths
                 and W. F. King and R. A. Lorie and P. R. McJones and J.
                 W. Mehl and G. R. Putzolu and I. L. Traiger and B. W.
                 Wade and V. Watson",
  title =        "{System R}: a Relational Approach to Database
                 Management",
  journal =      j-TODS,
  volume =       "1",
  number =       "2",
  pages =        "97--137",
  month =        jun,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Object/Nierstrasz.bib",
  note =         "Also published in/as: IBM, San Jose, Research Report.
                 No. RJ-1738, Feb. 1976. Reprinted in
                 \cite{Stonebraker:1988:RDS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-2/p97-astrahan/p97-astrahan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-2/p97-astrahan/",
  abstract =     "System R is a database management system which
                 provides a high level relational data interface. The
                 systems provides a high level of data independence by
                 isolating the end user as much as possible from
                 underlying storage structures. The system permits
                 definition of a variety of relational views on common
                 underlying data. Data control features are provided,
                 including authorization, integrity assertions,
                 triggered transactions, a logging and recovery
                 subsystem, and facilities for maintaining data
                 consistency in a shared-update environment.\par

                 This paper contains a description of the overall
                 architecture and design of the system. At the present
                 time the system is being implemented and the design
                 evaluated. We emphasize that System R is a vehicle for
                 research in database architecture, and is not planned
                 as a product.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "authorization; data structures; database; dblit; index
                 structures; locking; nonprocedural language; recovery;
                 relational model; TODS relation database IBM San Jose",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf System R}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Relational databases}; Information Systems --- Database
                 Management (H.2)",
}

@Article{Navathe:1976:RLD,
  author =       "Shamkant B. Navathe and James P. Fry",
  title =        "Restructuring for Large Data Bases: Three Levels of
                 Abstraction",
  journal =      j-TODS,
  volume =       "1",
  number =       "2",
  pages =        "138--158",
  month =        mar,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in \cite[p.~174]{Kerr:1975:PIC}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-2/p138-navathe/p138-navathe.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-2/p138-navathe/",
  abstract =     "The development of a powerful restructuring function
                 involves two important components--the unambiguous
                 specification of the restructuring operations and the
                 realization of these operations in a software system.
                 This paper is directed to the first component in the
                 belief that a precise specification will provide a firm
                 foundation for the development of restructuring
                 algorithms and, subsequently, their implementation. The
                 paper completely defines the semantics of the
                 restructuring of tree structured databases.\par

                 The delineation of the restructuring function is
                 accomplished by formulating three different levels of
                 abstraction, with each level of abstraction
                 representing successively more detailed semantics of
                 the function.\par

                 At the first level of abstraction, the schema
                 modification, three types are identified--naming,
                 combining, and relating; these three types are further
                 divided into eight schema operations. The second level
                 of abstraction, the instance operations, constitutes
                 the transformations on the data instances; they are
                 divided into group operations such as replication,
                 factoring, union, etc., and group relation operations
                 such as collapsing, refinement, fusion, etc. The final
                 level, the item value operations, includes the actual
                 item operations, such as copy value, delete value, or
                 create a null value.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data definition; data translation; database; database
                 management systems; logical restructuring",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management ---
                 Heterogeneous Databases (H.2.5): {\bf Data
                 translation**}",
}

@Article{Yao:1976:DDR,
  author =       "S. B. Yao and K. S. Das and T. J. Teorey",
  title =        "A Dynamic Database Reorganization Algorithm",
  journal =      j-TODS,
  volume =       "1",
  number =       "2",
  pages =        "159--174",
  month =        jun,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: Purdue Un., TR-168, Nov. 1975.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-2/p159-yao/p159-yao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-2/p159-yao/",
  abstract =     "Reorganization is necessary in some databases for
                 overcoming the performance deterioration caused by
                 updates. The paper presents a dynamic reorganization
                 algorithm which makes the reorganization decision by
                 measuring the database search costs. Previously, the
                 reorganization intervals could only be determined for
                 linear deterioration and known database lifetime. It is
                 shown that the dynamic reorganization algorithm is near
                 optimum for constant reorganization cost and is
                 superior for increasing reorganization cost. In
                 addition, it can be applied to cases of unknown
                 database lifetime and nonlinear performance
                 deterioration. The simplicity, generality, and
                 efficiency appear to make this good heuristic for
                 database reorganization.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database; file organization; information retrieval;
                 reorganization",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Retrieval models}",
}

@Article{Burkhard:1976:HTA,
  author =       "Walter A. Burkhard",
  title =        "Hashing and Trie Algorithms for Partial-Match
                 Retrieval",
  journal =      j-TODS,
  volume =       "1",
  number =       "2",
  pages =        "175--187",
  month =        jun,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; Graphics/siggraph/76.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: UCSD, Appl. Physics and Inf. Sc,
                 CS TR.2, Jun. 1975.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-2/p175-burkhard/p175-burkhard.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-2/p175-burkhard/",
  abstract =     "File designs suitable for retrieval from a file of
                 $k$-letter words when queries may be only partially
                 specified are examined. A new class of partial match
                 file designs (called PMF designs) based upon hash
                 coding and trie search algorithms which provide good
                 worst-case performance is introduced. Upper bounds on
                 the worst-case performance of these designs are given
                 along with examples of files achieving the bound. Other
                 instances of PMF designs are known to have better
                 worst-case performances. The implementation of the file
                 designs with associated retrieval algorithms is
                 considered. The amount of storage required is
                 essentially that required of the records themselves.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; analysis; associative retrieval; hash
                 coding; partial match; retrieval; searching; trie
                 search",
  oldlabel =     "geom-96",
  subject =      "Mathematics of Computing --- Mathematical Software
                 (G.4): {\bf Algorithm design and analysis}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf
                 Retrieval models}",
}

@Article{Stonebraker:1976:DII,
  author =       "Michael Stonebraker and Eugene Wong and Peter Kreps
                 and Gerald Held",
  title =        "The Design and Implementation of {INGRES}",
  journal =      j-TODS,
  volume =       "1",
  number =       "3",
  pages =        "189--222",
  month =        sep,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  note =         "Reprinted in \cite{Stonebraker:1988:RDS}. Also
                 published in/as: UCB, Elec. Res. Lab, Memo No.
                 ERL-M577, Jan. 1976.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-3/p189-stonebraker/p189-stonebraker.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-3/p189-stonebraker/",
  abstract =     "The currently operational (March 1976) version of the
                 INGRES database management system is described. This
                 multiuser system gives a relational view of data,
                 supports two high level nonprocedural data
                 sublanguages, and runs as a collection of user
                 processes on top of the UNIX operating system for
                 Digital Equipment Corporation PDP 11/40, 11/45, and
                 11/70 computers. Emphasis is on the design decisions
                 and tradeoffs related to (1) structuring the system
                 into processes, (2) embedding one command language in a
                 general purpose programming language, (3) the
                 algorithms implemented to process interactions, (4) the
                 access methods implemented, (5) the concurrency and
                 recovery control currently provided, and (6) the data
                 structures used for system catalogs and the role of the
                 database administrator.\par

                 Also discussed are (1) support for integrity
                 constraints (which is only partly operational), (2) the
                 not yet supported features concerning views and
                 protection, and (3) future plans concerning the
                 system.",
  acknowledgement = ack-nhfb,
  annote =       "Describes implementation of INGRES, a non-distributed
                 relational database system. This paper is useful for
                 understanding the distributed INGRES paper.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; data integrity; data organization; data
                 sublanguage; database optimization; nonprocedural
                 language; protection; QUEL EQUEL query modification
                 process structure Halloween problem TODS; query
                 decomposition; query language; relational database",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management ---
                 Languages (H.2.3); Information Systems --- Database
                 Management --- General (H.2.0): {\bf Security,
                 integrity, and protection**}",
}

@Article{Wong:1976:DSQ,
  author =       "Eugene Wong and Karel Youssefi",
  title =        "Decomposition --- {A} Strategy for Query Processing",
  journal =      j-TODS,
  volume =       "1",
  number =       "3",
  pages =        "223--241",
  month =        sep,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: UCB, Elec. Res. Lab, Memo No.
                 ERL-574, Jan. 1976;",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-3/p223-wong/p223-wong.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-3/p223-wong/",
  abstract =     "Strategy for processing multivariable queries in the
                 database management system INGRES is considered. The
                 general procedure is to decompose the query into a
                 sequence of one-variable queries by alternating between
                 (a) reduction: breaking off components of the query
                 which are joined to it by a single variable, and (b)
                 tuple substitution: substituting for one of the
                 variables a tuple at a time. Algorithms for reduction
                 and for choosing the variable to be substituted are
                 given. In most cases the latter decision depends on
                 estimation of costs; heuristic procedures for making
                 such estimates are outlined.",
  acknowledgement = ack-nhfb,
  annote =       "INGRES query decomposition by reduction to single
                 variable queries, and tuple substitution --- choosing a
                 variable and for it from all tuples, generating a
                 family of queries in one fewer variable.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "connected query; decomposition; detachment; Ingres
                 TODS; irreducible query; joining (overlapping)
                 variable; query processing; relational database; tuple
                 substitution; variable selection",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@Article{Griffiths:1976:AMR,
  author =       "Patricia P. Griffiths and Bradford W. Wade",
  title =        "An Authorization Mechanism for a Relational Database
                 System",
  journal =      j-TODS,
  volume =       "1",
  number =       "3",
  pages =        "242--255",
  month =        sep,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-3/p242-griffiths/p242-griffiths.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-3/p242-griffiths/",
  abstract =     "A multiuser database system must selectively permit
                 users to share data, while retaining the ability to
                 restrict data access. There must be a mechanism to
                 provide protection and security, permitting information
                 to be accessed only by properly authorized users.
                 Further, when tables or restricted views of tables are
                 created and destroyed dynamically, the granting,
                 authentication, and revocation of authorization to use
                 them must also be dynamic. Each of these issues and
                 their solutions in the context of the relational
                 database management system System R are discussed.
                 \par

                 When a database user creates a table, he is fully and
                 solely authorized to perform upon it actions such as
                 read, insert, update, and delete. He may explicitly
                 grant to any other user any or all of his privileges on
                 the table. In addition he may specify that that user is
                 authorized to further grant these privileges to still
                 other users. The result is a directed graph of granted
                 privileges originating from the table creator.\par

                 At some later time a user A may revoke some or all of
                 the privileges which he previously granted to another
                 user B. This action usually revokes the entire subgraph
                 of the grants originating from A's grant to B. It may
                 be, however, that B will still possess the revoked
                 privileges by means of a grant from another user C, and
                 therefore some or all of B's grants should not be
                 revoked. This problem is discussed in detail, and an
                 algorithm for detecting exactly which of B's grants
                 should be revoked is presented.",
  acknowledgement = ack-nhfb,
  annote =       "Defines a dynamic authorization mechanism. A database
                 user can grant or revoke privileges (such as to read,
                 insert, or delete) on a file that he has created.
                 Furthermore, he can authorize others to grant these
                 same privileges. The database management system keeps
                 track of a directed graph, emanating from the creator
                 of granted privileges.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access control; authorization; data dependent
                 authorization; database systems; privacy; protection in
                 databases; revocation of authorization; security",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4); Information Systems --- Database Management
                 --- General (H.2.0): {\bf Security, integrity, and
                 protection**}",
}

@Article{Severance:1976:DFT,
  author =       "Dennis G. Severance and Guy M. Lohman",
  title =        "Differential Files: Their Application to the
                 Maintenance of Large Databases",
  journal =      j-TODS,
  volume =       "1",
  number =       "3",
  pages =        "256--267",
  month =        sep,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-3/p256-severance/p256-severance.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-3/p256-severance/",
  abstract =     "The representation of a collection of data in terms of
                 its differences from some preestablished point of
                 reference is a basic storage compaction technique which
                 finds wide applicability. This paper describes a
                 differential database representation which is shown to
                 be an efficient method for storing large and volatile
                 databases. The technique confines database
                 modifications to a relatively small area of physical
                 storage and as a result offers two significant
                 operational advantages. First, because the ``reference
                 point'' for the database is inherently static, it can
                 be simply and efficiently stored. Second, since all
                 modifications to the database are physically localized,
                 the process of backup and the process of recovery are
                 relatively fast and inexpensive.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "backup and recovery; data sharing; database
                 maintenance; differential files",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Shneiderman:1976:BSS,
  author =       "Ben Shneiderman and Victor Goodman",
  title =        "Batched Searching of Sequential and Tree Structured
                 Files",
  journal =      j-TODS,
  volume =       "1",
  number =       "3",
  pages =        "268--275",
  month =        sep,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See comments in \cite{Piwowarski:1985:CBS}. Also
                 published in/as: Indiana Un., CSD Tech. Ref. 0132.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-3/p268-shneiderman/p268-shneiderman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-3/p268-shneiderman/",
  abstract =     "The technique of batching searches has been ignored in
                 the context of disk based online data retrieval
                 systems. This paper suggests that batching be
                 reconsidered for such systems since the potential
                 reduction in processor demand may actually reduce
                 response time. An analysis with sample numerical
                 results and algorithms is presented.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval (H.3)",
}

@Article{Bernstein:1976:STN,
  author =       "Philip A. Bernstein",
  title =        "Synthesizing Third Normal Form Relations from
                 Functional Dependencies",
  journal =      j-TODS,
  volume =       "1",
  number =       "4",
  pages =        "277--298",
  month =        dec,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-4/p277-bernstein/p277-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-4/p277-bernstein/",
  abstract =     "It has been proposed that the description of a
                 relational database can be formulated as a set of
                 functional relationships among database attributes.
                 These functional relationships can then be used to
                 synthesize algorithmically a relational scheme. It is
                 the purpose of this paper to present an effective
                 procedure for performing such a synthesis. The schema
                 that results from this procedure is proved to be in
                 Codd's third normal form and to contain the fewest
                 possible number of relations. Problems with earlier
                 attempts to construct such a procedure are also
                 discussed.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database schema; functional dependency; relational
                 model; semantics of data; third normal form",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Normal forms}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Schema and subschema}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}",
}

@Article{Liu:1976:APS,
  author =       "Jane W. S. Liu",
  title =        "Algorithms for parsing search queries in systems with
                 inverted file organization",
  journal =      j-TODS,
  volume =       "1",
  number =       "4",
  pages =        "299--316",
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-4/p299-liu/p299-liu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-4/p299-liu/",
  abstract =     "In an inverted file system a query is in the form of a
                 Boolean expression of index terms. In response to a
                 query the system accesses the inverted lists
                 corresponding to the index terms, merges them, and
                 selects from the merged list those records that satisfy
                 the search logic. Considered in this paper is the
                 problem of determining a Boolean expression which leads
                 to the minimum total merge time among all Boolean
                 expressions that are equivalent to the expression given
                 in the query. This problem is the same as finding an
                 optimal merge tree among all trees that realize the
                 truth function determined by the Boolean expression in
                 the query. Several algorithms are described which
                 generate optimal merge trees when the sizes of overlaps
                 between different lists are small compared with the
                 length of the lists.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "inverted file systems; merge algorithms; parsing
                 Boolean queries",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Mathematics of Computing ---
                 Mathematical Software (G.4): {\bf Algorithm design and
                 analysis}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Query processing}",
}

@Article{Sherman:1976:PDM,
  author =       "Stephen W. Sherman and Richard S. Brice",
  title =        "Performance of a Database Manager in a Virtual Memory
                 System",
  journal =      j-TODS,
  volume =       "1",
  number =       "4",
  pages =        "317--343",
  month =        dec,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-4/p317-sherman/p317-sherman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-4/p317-sherman/",
  abstract =     "Buffer space is created and managed in database
                 systems in order to reduce accesses to the I/O devices
                 for database information. In systems using virtual
                 memory any increase in the buffer space may be
                 accompanied by an increase in paging. The effects of
                 these factors on system performance are quantified
                 where system performance is a function of page faults
                 and database accesses to I/O devices. This phenomenon
                 is examined through the analysis of empirical data
                 gathered in a multifactor experiment. The factors
                 considered are memory size, size of buffer space,
                 memory replacement algorithm, and buffer management
                 algorithm. The improvement of system performance
                 through an increase in the size of the buffer space is
                 demonstrated. It is also shown that for certain values
                 of the other factors an increase in the size of the
                 buffer space can cause performance to deteriorate.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer manager; Buffer operating system support TODS;
                 database management; double paging; page faults; page
                 replacement algorithm; performance; virtual buffer;
                 virtual memory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Database Manager}; Mathematics of
                 Computing --- Mathematical Software (G.4): {\bf
                 Algorithm design and analysis}; Computer Systems
                 Organization --- Performance of Systems (C.4)",
}

@Article{Donovan:1976:DSA,
  author =       "John J. Donovan",
  title =        "Database System Approach to Management Decision
                 Support",
  journal =      j-TODS,
  volume =       "1",
  number =       "4",
  pages =        "344--369",
  month =        dec,
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-4/p344-donovan/p344-donovan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-4/p344-donovan/",
  abstract =     "Traditional intuitive methods of decision-making are
                 no longer adequate to deal with the complex problems
                 faced by the modern policymaker. Thus systems must be
                 developed to provide the information and analysis
                 necessary for the decisions which must be made. These
                 systems are called decision support systems. Although
                 database systems provide a key ingredient to decision
                 support systems, the problems now facing the
                 policymaker are different from those problems to which
                 database systems have been applied in the past. The
                 problems are usually not known in advance, they are
                 constantly changing, and answers are needed quickly.
                 Hence additional technologies, methodologies, and
                 approaches must expand the traditional areas of
                 database and operating systems research (as well as
                 other software and hardware research) in order for them
                 to become truly effective in supporting policymakers.
                 \par

                 This paper describes recent work in this area and
                 indicates where future work is needed. Specifically the
                 paper discusses: (1) why there exists a vital need for
                 decision support systems; (2) examples from work in the
                 field of energy which make explicit the characteristics
                 which distinguish these decision support systems from
                 traditional operational and managerial systems; (3) how
                 an awareness of decision support systems has evolved,
                 including a brief review of work done by others and a
                 statement of the computational needs of decision
                 support systems which are consistent with contemporary
                 technology; (4) an approach which has been made to meet
                 many of these computational needs through the
                 development and implementation of a computational
                 facility, the Generalized Management Information System
                 (GMIS); and (5) the application of this computational
                 facility to a complex and important energy problem
                 facing New England in a typical study within the New
                 England Energy Management Information System (NEEMIS)
                 Project.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems; decision support systems; management
                 applications; modeling; networking; relational; virtual
                 machines",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management --- Systems
                 (H.2.4)",
}

@Article{McGee:1976:UCD,
  author =       "William C. McGee",
  title =        "On user criteria for data model evaluation",
  journal =      j-TODS,
  volume =       "1",
  number =       "4",
  pages =        "370--387",
  year =         "1976",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1976-1-4/p370-mcgee/p370-mcgee.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1976-1-4/p370-mcgee/",
  abstract =     "The emergence of a database technology in recent years
                 has focused interest on the subject of data models. A
                 data model is the class of logical data structures
                 which a computer system or language makes available to
                 the user for the purpose of formulating data processing
                 applications. The diversity of computer systems and
                 languages has resulted in a corresponding diversity of
                 data models, and has created a problem for the user in
                 selecting a data model which is in some sense
                 appropriate to a given application. An evaluation
                 procedure is needed which will allow the user to
                 evaluate alternative models in the context of a
                 specific set of applications. This paper takes a first
                 step toward such a procedure by identifying the
                 attributes of a data model which can be used as
                 criteria for evaluating the model. Two kinds of
                 criteria are presented: use criteria, which measure the
                 usability of the model; and implementation criteria,
                 which measure the implementability of the model and the
                 efficiency of the resulting implementation. The use of
                 the criteria is illustrated by applying them to three
                 specific models: an $n$-ary relational model, a
                 hierarchic model, and a network model.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data model; data model evaluation; data model
                 selection; hierarchic model; network model; relational
                 model",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}",
}

@Article{Kam:1977:MSD,
  author =       "John B. Kam and Jeffrey D. Ullman",
  title =        "A Model of Statistical Databases and Their Security",
  journal =      j-TODS,
  volume =       "2",
  number =       "1",
  pages =        "1--10",
  month =        mar,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-1/p1-kam/p1-kam.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-1/p1-kam/",
  abstract =     "Considered here, for a particular model of databases
                 in which only information about relatively large sets
                 of records can be obtained, is the question of whether
                 one can from statistical information obtain information
                 about individuals. Under the assumption that the data
                 in the database is taken from arbitrary integers, it is
                 shown that essentially nothing can be inferred. It is
                 also shown that when the values are known to be
                 imprecise in some fixed range, one can often deduce the
                 values of individual records.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compromisability; data security; linear independence;
                 statistical database; vector spece",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}; Information Systems --- Database Management
                 --- General (H.2.0): {\bf Security, integrity, and
                 protection**}",
}

@Article{Bayer:1977:PBT,
  author =       "Rudolf Bayer and Karl Unterauer",
  title =        "Prefix {B}-trees",
  journal =      j-TODS,
  volume =       "2",
  number =       "1",
  pages =        "11--26",
  month =        mar,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: IBM Yorktwon, Technical Report
                 RJ1796, Jun. 1976.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-1/p11-bayer/p11-bayer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-1/p11-bayer/",
  abstract =     "Two modifications of $B$-trees are described, simple
                 prefix $B$-trees and prefix $B$-trees. Both store only
                 parts of keys, namely prefixes, in the index part of a
                 $B$ *-tree. In simple prefix $B$-trees those prefixes
                 are selected carefully to minimize their length. In
                 prefix $B$-trees the prefixes need not be fully stored,
                 but are reconstructed as the tree is searched. Prefix
                 $B$-trees are designed to combine some of the
                 advantages of $B$-trees, digital search trees, and key
                 compression techniques while reducing the processing
                 overhead of compression techniques.",
  acknowledgement = ack-nhfb,
  annote =       "Index Btree structures can easily be compressed.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "truncation compression TODS",
  subject =      "Data --- Data Structures (E.1): {\bf Trees}",
}

@Article{Schkolnick:1977:CAH,
  author =       "Mario Schkolnick",
  title =        "A Clustering Algorithm for Hierarchical Structures",
  journal =      j-TODS,
  volume =       "2",
  number =       "1",
  pages =        "27--44",
  month =        may,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 09:36:45 1996",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  annote =       "Optimal file partitioning, applied to IMS.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
}

@Article{Yao:1977:ABM,
  author =       "S. B. Yao",
  title =        "An Attribute Based Model for Database Access Cost
                 Analysis",
  journal =      j-TODS,
  volume =       "2",
  number =       "1",
  pages =        "45--67",
  month =        mar,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in \cite{Yao:1977:ABA}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-1/p45-yao/p45-yao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-1/p45-yao/",
  abstract =     "A generalized model for physical database
                 organizations is presented. Existing database
                 organizations are shown to fit easily into the model as
                 special cases. Generalized access algorithms and cost
                 equations associated with the model are developed and
                 analyzed. The model provides a general design framework
                 in which the distinguishing properties of database
                 organizations are made explicit and their performances
                 can be compared.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "B-tree; database model; database organization;
                 database performance; estimation approximation TODS;
                 evaluation; index organization; index sequential;
                 inverted file; multilist",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1); Information Systems ---
                 Information Storage and Retrieval --- Content Analysis
                 and Indexing (H.3.1); Data --- Data Structures (E.1):
                 {\bf Trees}",
}

@Article{Anderson:1977:MCS,
  author =       "Henry D. Anderson and P. Bruce Berra",
  title =        "Minimum Cost Selection of Secondary Indexes for
                 Formatted Files",
  journal =      j-TODS,
  volume =       "2",
  number =       "1",
  pages =        "68--90",
  month =        mar,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-1/p68-anderson/p68-anderson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-1/p68-anderson/",
  abstract =     "Secondary indexes are often used in database
                 management systems for secondary key retrieval.
                 Although their use can improve retrieval time
                 significantly, the cost of index maintenance and
                 storage increases the overhead of the file processing
                 application. The optimal set of indexed secondary keys
                 for a particular application depends on a number of
                 application dependent factors. In this paper a cost
                 function is developed for the evaluation of candidate
                 indexing choices and applied to the optimization of
                 index selection. Factors accounted for include file
                 size, the relative rates of retrieval and maintenance
                 and the distribution of retrieval and maintenance over
                 the candidate keys, index structure, and system
                 charging rates. Among the results demonstrated are the
                 increased effectiveness of secondary indexes for large
                 files, the effect of the relative rates of retrieval
                 and maintenance, the greater cost of allowing for
                 arbitrarily formulated queries, and the impact on cost
                 of the use of different index structures.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access methods; access path; Boolean query; cost
                 function; data management; database; file design; file
                 organization; inverted file; inverted index;
                 maintenance; optimization; retrieval; secondary index;
                 secondary key; secondary key access",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Content Analysis and Indexing (H.3.1):
                 {\bf Indexing methods}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Storage (H.3.2): {\bf File organization}; Information
                 Systems --- Database Management --- Physical Design
                 (H.2.2): {\bf Access methods}; Information Systems ---
                 Database Management (H.2)",
}

@Article{Lorie:1977:PIL,
  author =       "Raymond A. Lorie",
  title =        "Physical Integrity in a Large Segmented Database",
  journal =      j-TODS,
  volume =       "2",
  number =       "1",
  pages =        "91--104",
  month =        mar,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-1/p91-lorie/p91-lorie.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-1/p91-lorie/",
  abstract =     "A database system can generally be divided into three
                 major components. One component supports the logical
                 database as seen by the user. Another component maps
                 the information into physical records. The third
                 component, called the storage component, is responsible
                 for mapping these records onto auxiliary storage
                 (generally disks) and controlling their transfer to and
                 from main storage.\par

                 This paper is primarily concerned with the
                 implementation of a storage component. It considers a
                 simple and classical interface to the storage
                 component: Seen at this level the database is a
                 collection of segments. Each segment is a linear
                 address space.\par

                 A recovery scheme is first proposed for system failure
                 (hardware or software error which causes the contents
                 of main storage to be lost). It is based on maintaining
                 a dual mapping between pages and their location on
                 disk. One mapping represents the current state of a
                 segment being modified; the other represents a previous
                 backup state. At any time the backup state can be
                 replaced by the current state without any data merging.
                 Procedures for segment modification, save, and restore
                 are analyzed. Another section proposes a facility for
                 protection against damage to the auxiliary storage
                 itself. It is shown how such protection can be obtained
                 by copying on a tape (checkpoint) only those pages that
                 have been modified since the last checkpoint.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "checkpoint-restart; database; recovery; storage
                 management",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0): {\bf Security, integrity, and
                 protection**}; Information Systems --- Information
                 Storage and Retrieval --- Information Storage (H.3.2);
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart}",
}

@Article{Smith:1977:DAA,
  author =       "John Miles Smith and Diane C. P. Smith",
  title =        "Database abstractions: Aggregation and
                 Generalization",
  journal =      j-TODS,
  volume =       "2",
  number =       "2",
  pages =        "105--133",
  month =        jun,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Object/Nierstrasz.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-2/p105-smith/p105-smith.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-2/p105-smith/",
  abstract =     "Two kinds of abstraction that are fundamentally
                 important in database design and usage are defined.
                 Aggregation is an abstraction which turns a
                 relationship between objects into an aggregate object.
                 Generalization is an abstraction which turns a class of
                 objects into a generic object. It is suggested that all
                 objects (individual, aggregate, generic) should be
                 given uniform treatment in models of the real world. A
                 new data type, called generic, is developed as a
                 primitive for defining such models. Models defined with
                 this primitive are structured as a set of aggregation
                 hierarchies intersecting with a set of generalization
                 hierarchies. Abstract objects occur at the points of
                 intersection. This high level structure provides a
                 discipline for the organization of relational
                 databases. In particular this discipline allows: (i) an
                 important class of views to be integrated and
                 maintained; (ii) stability of data and programs under
                 certain evolutionary changes; (iii) easier
                 understanding of complex models and more natural {\em
                 query formulation;\/} (iv) {\em a more systematic
                 approach to database design;\/} (v) {\em more
                 optimization\/} to be performed at lower implementation
                 levels. The generic type is formalized by a set of
                 invariant properties. These properties should be
                 satisfied by all relations in a database if
                 abstractions are to be preserved. A triggering
                 mechanism for automatically maintaining these
                 invariants during update operations is proposed. A
                 simple mapping of aggregation/generalization
                 hierarchies onto owner-coupled set structures is
                 given.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "aggregation; data abstraction; data model; data type;
                 database design; dblit data abstraction;
                 generalization; integrity constraints; knowledge
                 representation; relational database",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Relational databases}; Software --- Software
                 Engineering --- Software Architectures (D.2.11): {\bf
                 Data abstraction}",
}

@Article{Shu:1977:EDE,
  author =       "N. C. Shu and B. C. Housel and R. W. Taylor and S. P.
                 Ghosh and V. Y. Lum",
  title =        "{EXPRESS}: a data {EXtraction, Processing, and
                 Restructuring System}",
  journal =      j-TODS,
  volume =       "2",
  number =       "2",
  pages =        "134--174",
  month =        jun,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-2/p134-shu/p134-shu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-2/p134-shu/",
  abstract =     "EXPRESS is an experimental prototype data translation
                 system which can access a wide variety of data and
                 restructure it for new uses. The system is driven by
                 two very high level nonprocedural languages: DEFINE for
                 data description and CONVERT for data restructuring.
                 Program generation and cooperating process techniques
                 are used to achieve efficient operation.\par

                 This paper describes the design and implementation of
                 EXPRESS. DEFINE and CONVERT are summarized and the
                 implementation architecture presented.\par

                 The DEFINE description is compiled into a customized
                 PL/1 program for accessing source data. The
                 restructuring specified in CONVERT is compiled into a
                 set of customized PL/1 procedures to derive multiple
                 target files from multiple input files. Job steps and
                 job control statements are generated automatically.
                 During execution, the generated procedures run under
                 control of a process supervisor, which coordinates
                 buffer management and handles file allocation,
                 deallocation, and all input/output requests.\par

                 The architecture of EXPRESS allows efficiency in
                 execution by avoiding unnecessary secondary storage
                 references while at the same time allowing the
                 individual procedures to be independent of each other.
                 Its modular structure permits the system to be extended
                 or transferred to another environment easily.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data conversion; data description languages; data
                 manipulation languages; data restructuring; data
                 translation; file conversion; program generation; very
                 high level languages",
  subject =      "Information Systems --- Database Management ---
                 Heterogeneous Databases (H.2.5): {\bf Data
                 translation**}; Information Systems --- Information
                 Storage and Retrieval --- Information Storage (H.3.2):
                 {\bf File organization}; Information Systems ---
                 Database Management --- Languages (H.2.3)",
}

@Article{Ozkarahan:1977:PER,
  author =       "E. A. Ozkarahan and S. A. Schuster and K. C. Sevcik",
  title =        "Performance Evaluation of a Relational Associative
                 Processor",
  journal =      j-TODS,
  volume =       "2",
  number =       "2",
  pages =        "175--195",
  month =        jun,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-2/p175-ozkarahan/p175-ozkarahan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-2/p175-ozkarahan/",
  abstract =     "An associative processor called RAP has been designed
                 to provide hardware support for the use and
                 manipulation of databases. RAP is particularly suited
                 for supporting relational databases. In this paper, the
                 relational operations provided by the RAP hardware are
                 described, and a representative approach to providing
                 the same relational operations with conventional
                 software and hardware is devised. Analytic models are
                 constructed for RAP and the conventional system. The
                 execution times of several of the operations are shown
                 to be vastly improved with RAP for large relations.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative processors; database machines; performance
                 evaluation; RAP hardware support database machine TODS;
                 relational databases",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}; Hardware
                 --- Control Structures and Microprogramming --- Control
                 Structure Performance Analysis and Design Aids
                 (B.1.2)",
}

@Article{Brice:1977:EPD,
  author =       "Richard S. Brice and Stephen W. Sherman",
  title =        "An Extension on the Performance of a Database Manager
                 in a Virtual Memory System Using Partially Locked
                 Virtual Buffers",
  journal =      j-TODS,
  volume =       "2",
  number =       "2",
  pages =        "196--207",
  month =        jun,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-2/p196-brice/p196-brice.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-2/p196-brice/",
  abstract =     "Buffer pools are created and managed in database
                 systems in order to reduce the total number of accesses
                 to the I/O devices. In systems using virtual memory,
                 any reduction in I/O accesses may be accompanied by an
                 increase in paging. The effects of these factors on
                 system performance are quantified, where system
                 performance is a function of page faults and database
                 accesses to the I/O devices. A previous study of this
                 phenomenon is extended through the analysis of
                 empirical data gathered in a multifactor experiment. In
                 this study memory is partitioned between the program
                 and the buffer so that the impact of the controlled
                 factors can be more effectively evaluated. It is
                 possible to improve system performance through the use
                 of different paging algorithms in the program partition
                 and the buffer partition. Also, the effects on system
                 performance as the virtual buffer size is increased
                 beyond the real memory allocated to the buffer
                 partition are investigated.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer manager; database management; double paging;
                 locked buffer; page faults; page replacement algorithm;
                 performance; pinning fixing TODS; virtual buffer;
                 virtual memory",
  subject =      "Hardware --- Control Structures and Microprogramming
                 --- Control Structure Performance Analysis and Design
                 Aids (B.1.2); Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Database
                 Manager}",
}

@Article{Lohman:1977:OPB,
  author =       "Guy M. Lohman and John A. Muckstadt",
  title =        "Optimal Policy for Batch Operations: Backup,
                 Checkpointing, Reorganization, and Updating",
  journal =      j-TODS,
  volume =       "2",
  number =       "3",
  pages =        "209--222",
  month =        sep,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-3/p209-lohman/p209-lohman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-3/p209-lohman/",
  abstract =     "Many database maintenance operations are performed
                 periodically in batches, even in realtime systems. The
                 purpose of this paper is to present a general model for
                 determining the optimal frequency of these batch
                 operations. Specifically, optimal backup,
                 checkpointing, batch updating, and reorganization
                 policies are derived. The approach used exploits
                 inventory parallels by seeking the optimal number of
                 items--rather than a time interval--to trigger a batch.
                 The Renewal Reward Theorem is used to find the average
                 long run costs for backup, recovery, and item storage,
                 per unit time, which is then minimized to find the
                 optimal backup policy. This approach permits far less
                 restrictive assumptions about the update arrival
                 process than did previous models, as well as inclusion
                 of storage costs for the updates. The optimal
                 checkpointing, batch updating, and reorganization
                 policies are shown to be special cases of this optimal
                 backup policy. The derivation of previous results as
                 special cases of this model, and an example,
                 demonstrate the generality of the methodology
                 developed.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "backup frequency; batch operations; batch update;
                 checkpoint interval; data base systems; database
                 maintenance; file reorganization; inventory theory;
                 real-time systems; renewal theory",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0)",
}

@Article{Wong:1977:IHT,
  author =       "Kai C. Wong and Murray Edelberg",
  title =        "Interval Hierarchies and Their Application to
                 Predicate Files",
  journal =      j-TODS,
  volume =       "2",
  number =       "3",
  pages =        "223--232",
  month =        sep,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-3/p223-wong/p223-wong.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-3/p223-wong/",
  abstract =     "Predicates are used extensively in modern database
                 systems for purposes ranging from user specification of
                 associative accesses to data, to user-invisible system
                 control functions such as concurrency control and data
                 distribution. Collections of predicates, or predicate
                 files, must be maintained and accessed efficiently. A
                 dynamic index is described, called an interval
                 hierarchy, which supports several important retrieval
                 operations on files of simple conjunctive predicates.
                 Search and maintenance algorithms for interval
                 hierarchies are given. For a file of n predicates,
                 typical of the kind expected in practice, these
                 algorithms require time equal to $ O(\log n) $.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; data base systems; database
                 system; distributed data; index; interval; predicate
                 file",
  subject =      "Software --- Operating Systems --- Storage Management
                 (D.4.2): {\bf Storage hierarchies}; Information Systems
                 --- Information Storage and Retrieval --- Information
                 Storage (H.3.2): {\bf File organization}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Distributed databases}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Concurrency}; Information Systems --- Information
                 Storage and Retrieval --- Content Analysis and Indexing
                 (H.3.1): {\bf Indexing methods}",
}

@Article{Ries:1977:ELG,
  author =       "Daniel R. Ries and Michael Stonebraker",
  title =        "Effects of Locking Granularity in a Database
                 Management System",
  journal =      j-TODS,
  volume =       "2",
  number =       "3",
  pages =        "233--246",
  month =        sep,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-3/p233-ries/p233-ries.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-3/p233-ries/",
  abstract =     "Many database systems guarantee some form of integrity
                 control upon multiple concurrent updates by some form
                 of locking. Some ``granule'' of the database is chosen
                 as the unit which is individually locked, and a lock
                 management algorithm is used to ensure integrity. Using
                 a simulation model, this paper explores the desired
                 size of a granule. Under a wide variety of seemingly
                 realistic conditions, surprisingly coarse granularity
                 is called for. The paper concludes with some
                 implications of these results concerning the viability
                 of so-called predicate locking.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; consistency; data base systems; database
                 management; locking granularity; multiple updates;
                 predicate locks",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management (H.2); Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Concurrency}",
}

@Article{Schmidt:1977:SHL,
  author =       "Joachim W. Schmidt",
  title =        "Some High Level Language Constructs for Data of Type
                 Relation",
  journal =      j-TODS,
  volume =       "2",
  number =       "3",
  pages =        "247--261",
  month =        sep,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-3/p247-schmidt/p247-schmidt.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-3/p247-schmidt/",
  abstract =     "For the extension of high level languages by data
                 types of mode relation, three language constructs are
                 proposed and discussed: a repetition statement
                 controlled by relations, predicates as a generalization
                 of Boolean expressions, and a constructor for relations
                 using predicates. The language constructs are developed
                 step by step starting with a set of elementary
                 operations on relations. They are designed to fit into
                 PASCAL without introducing too many additional
                 concepts.",
  acknowledgement = ack-nhfb,
  annote =       "PASCAL/R",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; data type; database;
                 high level language; language extension; nonprocedural
                 language; relational calculus; relational model",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management ---
                 Languages (H.2.3)",
}

@Article{Fagin:1977:MVD,
  author =       "Ronald Fagin",
  title =        "Multi-Valued Dependencies and a New Normal Form for
                 Relational Databases",
  journal =      j-TODS,
  volume =       "2",
  number =       "3",
  pages =        "262--278",
  month =        sep,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-3/p262-fagin/p262-fagin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-3/p262-fagin/",
  abstract =     "A new type of dependency, which includes the
                 well-known functional dependencies as a special case,
                 is defined for relational databases. By using this
                 concept, a new (``fourth'') normal form for relation
                 schemata is defined. This fourth normal form is
                 strictly stronger than Codd's ``improved third normal
                 form'' (or ``Boyce-Codd normal form''). It is shown
                 that every relation schema can be decomposed into a
                 family of relation schemata in fourth normal form
                 without loss of information (that is, the original
                 relation can be obtained from the new relations by
                 taking joins).",
  acknowledgement = ack-nhfb,
  annote =       "Multivalued dependency is defined for relational
                 databases, a new (``fourth'') normal form is strictly
                 stronger than Codd's.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "3NF; 4NF; Boyce-Codd normal form; data base systems;
                 database design; decomposition; fourth normal form;
                 functional dependency; multivalued dependency;
                 normalization; relational database; third normal form",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Normal forms}",
}

@Article{March:1977:DER,
  author =       "Salvatore T. March and Dennis G. Severance",
  title =        "The Determination of Efficient Record Segmentations
                 and Blocking Factors for Shared Data Files",
  journal =      j-TODS,
  volume =       "2",
  number =       "3",
  pages =        "279--296",
  month =        sep,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-3/p279-march/p279-march.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-3/p279-march/",
  abstract =     "It is generally believed that 80 percent of all
                 retrieval from a commercial database is directed at
                 only 20 percent of the stored data items. By
                 partitioning data items into primary and secondary
                 record segments, storing them in physically separate
                 files, and judiciously allocating available buffer
                 space to the two files, it is possible to significantly
                 reduce the average cost of information retrieval from a
                 shared database. An analytic model, based upon
                 knowledge of data item lengths, data access costs, and
                 user retrieval patterns, is developed to assist an
                 analyst with this assignment problem. A computationally
                 tractable design algorithm is presented and results of
                 its application are described.",
  acknowledgement = ack-nhfb,
  classification = "723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "bicriterion mathematical programs; branch and bound;
                 buffer allocation; data base systems; data management;
                 information science --- information retrieval; network
                 flows; record design; record segmentation",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Ozkarahan:1977:AAF,
  author =       "E. A. Ozkarahan and K. C. Sevcik",
  title =        "Analysis of Architectural Features for Enhancing the
                 Performance of a Database Machine",
  journal =      j-TODS,
  volume =       "2",
  number =       "4",
  pages =        "297--316",
  month =        dec,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-4/p297-ozkarahan/p297-ozkarahan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-4/p297-ozkarahan/",
  abstract =     "RAP (Relational Associative Processor) is a
                 ``back-end'' database processor that is intended to
                 take over much of the effort of database management in
                 a computer system. In order to enhance RAP's
                 performance its design includes mechanisms for
                 permitting features analogous to multiprogramming and
                 virtual memory as in general purpose computer systems.
                 It is the purpose of this paper to present the detailed
                 design of these mechanisms, along with some analysis
                 that supports their value. Specifically, (1) the
                 response time provided by RAP under several scheduling
                 disciplines involving priority by class is analyzed,
                 (2) the cost effectiveness of the additional hardware
                 in RAP necessary to support multiprogramming is
                 assessed, and (3) a detailed design of the RAP virtual
                 memory system and its monitor is presented.",
  acknowledgement = ack-nhfb,
  annote =       "RAP (Relational Associative Processor) is a ``back-end
                 database processor''; its design includes mechanisms
                 for multiprogramming and virtual memory.",
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative processors; computer architecture;
                 computer architecture, hardware support TODS; data base
                 systems; database machines; database management",
  subject =      "Information Systems --- Database Management (H.2)",
}

@Article{Rissanen:1977:ICR,
  author =       "Jorma Rissanen",
  title =        "Independent Components of Relations",
  journal =      j-TODS,
  volume =       "2",
  number =       "4",
  pages =        "317--325",
  month =        dec,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-4/p317-rissanen/p317-rissanen.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-4/p317-rissanen/",
  abstract =     "In a multiattribute relation or, equivalently, a
                 multicolumn table a certain collection of the
                 projections can be shown to be independent in much the
                 same way as the factors in a Cartesian product or
                 orthogonal components of a vector. A precise notion of
                 independence for relations is defined and studied. The
                 main result states that the operator which reconstructs
                 the original relation from its independent components
                 is the natural join, and that independent components
                 split the full family of functional dependencies into
                 corresponding component families. These give an
                 easy-to-check criterion for independence.",
  acknowledgement = ack-nhfb,
  annote =       "In a multi-attribute relation a certain collection of
                 projections can be shown to be independent. The
                 operator which reconstructs the original relation is
                 the natural join. Independent components split the full
                 family of functional dependencies into corresponding
                 component families.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database; functional dependencies;
                 relations",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Bonczek:1977:TGB,
  author =       "Robert H. Bonczek and James I. Cash and Andrew B.
                 Whinston",
  title =        "A Transformational Grammar-Based Query Processor for
                 Access Control in a Planning System",
  journal =      j-TODS,
  volume =       "2",
  number =       "4",
  pages =        "326--338",
  month =        dec,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-4/p326-bonczek/p326-bonczek.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-4/p326-bonczek/",
  abstract =     "Providing computer facilities and data availability to
                 larger numbers of users generates increased system
                 vulnerability which is partially offset by software
                 security systems. Much too often these systems are
                 presented as ad hoc additions to the basic data
                 management system. One very important constituent of
                 software security systems is the access control
                 mechanism which may be the last resource available to
                 prohibit unauthorized data retrieval. This paper
                 presents a specification for an access control
                 mechanism. The mechanism is specified in a context for
                 use with the GPLAN decision support system by a
                 theoretical description consistent with the formal
                 definition of GPLAN's query language. Incorporation of
                 the mechanism into the language guarantees it will not
                 be an ad hoc addition. Furthermore, it provides a
                 facile introduction of data security dictates into the
                 language processor.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access control; data processing; data security;
                 database; decision support system; planning system",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- General (H.2.0):
                 {\bf Security, integrity, and protection**};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods}",
}

@Article{Lang:1977:DBP,
  author =       "Tom{\'a}s Lang and Christopher Wood and Eduardo B.
                 Fern{\'a}ndez",
  title =        "Database Buffer Paging in Virtual Storage Systems",
  journal =      j-TODS,
  volume =       "2",
  number =       "4",
  pages =        "339--351",
  month =        dec,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-4/p339-lang/p339-lang.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-4/p339-lang/",
  abstract =     "Three models, corresponding to different sets of
                 assumptions, are analyzed to study the behavior of a
                 database buffer in a paging environment. The models
                 correspond to practical situations and vary in their
                 search strategies and replacement algorithms. The
                 variation of I/O cost with respect to buffer size is
                 determined for the three models. The analysis is valid
                 for arbitrary database and buffer sizes, and the I/O
                 cost is obtained in terms of the miss ratio, the buffer
                 size, the number of main memory pages available for the
                 buffer, and the relative buffer and database access
                 costs.",
  acknowledgement = ack-nhfb,
  annote =       "The variation of I/O cost with respect to buffer size
                 is determined for three models: the IMS/360 database
                 buffer, with LRU memory replacement, and a prefix table
                 in main memory indicating which database pages are in
                 the VSAM buffer.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer management; computer systems performance; data
                 base systems; database performance; page replacement
                 algorithm; virtual memory",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0); Information Systems --- Database
                 Management --- Systems (H.2.4)",
}

@Article{Thomas:1977:VAP,
  author =       "D. A. Thomas and B. Pagurek and R. J. Buhr",
  title =        "Validation Algorithms for Pointer Values in {DBTG}
                 Databases",
  journal =      j-TODS,
  volume =       "2",
  number =       "4",
  pages =        "352--369",
  month =        dec,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-4/p352-thomas/p352-thomas.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-4/p352-thomas/",
  abstract =     "This paper develops algorithms for verifying pointer
                 values in DBTG (Data Base Task Group) type databases.
                 To validate pointer implemented access paths and set
                 structures, two algorithms are developed. The first
                 procedure exploits the ``typed pointer'' concept
                 employed in modern programming languages to diagnose
                 abnormalities in directories and set instances. The
                 second algorithm completes pointer validation by
                 examining set instances to ensure that each DBTG set
                 has a unique owner. Sequential processing is used by
                 both algorithms, allowing a straightforward
                 implementation which is efficient in both time and
                 space. As presented, the algorithms are independent of
                 implementation schema and physical structure.",
  acknowledgement = ack-nhfb,
  annote =       "Type Checking algorithm detects and locates errors in
                 the pointers which are used to represent chained and
                 pointer array implemented sets. In addition to invalid
                 set pointers, the algorithm has been extended to check
                 index sequential and inverted access directories
                 provided by EDMS.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database integrity; database
                 utilities; type checking; validation",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management --- General
                 (H.2.0): {\bf Security, integrity, and protection**}",
}

@Article{Claybrook:1977:FDM,
  author =       "Billy G. Claybrook",
  title =        "A Facility for Defining and Manipulating Generalized
                 Data Structures",
  journal =      j-TODS,
  volume =       "2",
  number =       "4",
  pages =        "370--406",
  month =        dec,
  year =         "1977",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1977-2-4/p370-claybrook/p370-claybrook.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1977-2-4/p370-claybrook/",
  abstract =     "A data structure definition facility (DSDF) is
                 described that provides definitions for several
                 primitive data types, homogeneous and heterogeneous
                 arrays, cells, stacks, queues, trees, and general
                 lists. Each nonprimitive data structure consists of two
                 separate entities--a head and a body. The head contains
                 the entry point(s) to the body of the structure; by
                 treating the head like a cell, the DSDF operations are
                 capable of creating and manipulating very general data
                 structures. A template structure is described that
                 permits data structures to share templates.\par

                 The primary objectives of the DSDF are: (1) to develop
                 a definition facility that permits the programmer to
                 explicitly define and manipulate generalized data
                 structures in a consistent manner, (2) to detect
                 mistakes and prevent the programmer from creating
                 (either inadvertently or intentionally) undesirable (or
                 illegal) data structures, (3) to provide a syntactic
                 construction mechanism that separates the
                 implementation of a data structure from its use in the
                 program in which it is defined, and (4) to facilitate
                 the development of reliable software.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data definition languages; data processing; data
                 structure definition facility; data structures;
                 database management",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management ---
                 Languages (H.2.3)",
}

@Article{Minker:1978:SSS,
  author =       "Jack Minker",
  title =        "Search Strategy and Selection Function for an
                 Inferential Relational System",
  journal =      j-TODS,
  volume =       "3",
  number =       "1",
  pages =        "1--31",
  month =        mar,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-1/p1-minker/p1-minker.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-1/p1-minker/",
  abstract =     "An inferential relational system is one in which data
                 in the system consists of both explicit facts and
                 general axioms (or ``views''). The general axioms are
                 used together with the explicit facts to derive the
                 facts that are implicit (virtual relations) within the
                 system. A top-down algorithm, as used in artificial
                 intelligence work, is described to develop inferences
                 within the system. The top-down approach starts with
                 the query, a conjunction of relations, to be answered.
                 Either a relational fact solves a given relation in a
                 conjunct, or the relation is replaced by a conjunct of
                 relations which must be solved to solve the given
                 relation. The approach requires that one and only one
                 relation in a conjunction be replaced (or expanded) by
                 the given facts and general axioms. The decision to
                 expand only a single relation is termed a selection
                 function. It is shown for relational systems that such
                 a restriction still guarantees that a solution to the
                 problem will be found if one exists.\par

                 The algorithm provides for heuristic direction in the
                 search process. Experimental results are presented
                 which illustrate the techniques. A bookkeeping
                 mechanism is described which permits one to know when
                 subproblems are solved. It further facilitates the
                 outputting of reasons for the deductively found answer
                 in a coherent fashion.",
  acknowledgement = ack-nhfb,
  annote =       "Data in the system consists of both explicit facts and
                 general axioms. The top-down approach starts with the
                 query, a conjunction of relations, to be answered.
                 Either a relational fact solves a given relation in a
                 conjunct, or the relation is replaced by a conjunct of
                 relations which must be solved to solve the given
                 relation. Experimental results are presented which
                 illustrate the techniques.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "answer and reason extraction; data base systems;
                 heuristics; inference mechanism; logic; predicate
                 calculus; relational databases; search strategy;
                 selection function; top-down search; virtual
                 relations",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Search process}",
}

@Article{Tuel:1978:ORP,
  author =       "William G. {Tuel, Jr.}",
  title =        "Optimum Reorganization Points for Linearly Growing
                 Files",
  journal =      j-TODS,
  volume =       "3",
  number =       "1",
  pages =        "32--40",
  month =        mar,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-1/p32-tuel/p32-tuel.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-1/p32-tuel/",
  abstract =     "The problem of finding optimal reorganization
                 intervals for linearly growing files is solved. An
                 approximate reorganization policy, independent of file
                 lifetime, is obtained. Both the optimum and approximate
                 policies are compared to previously published results
                 using a numerical example.",
  acknowledgement = ack-nhfb,
  annote =       "The problem of finding optimal reorganization
                 intervals for linearly growing files is solved.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing --- file organization; database; file
                 organization; optimization; physical database design
                 TODS, data base systems; reorganization",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Yu:1978:END,
  author =       "C. T. Yu and W. S. Luk and M. K. Siu",
  title =        "On the Estimation of the Number of Desired Records
                 with Respect to a Given Query",
  journal =      j-TODS,
  volume =       "3",
  number =       "1",
  pages =        "41--56",
  month =        mar,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-1/p41-yu/p41-yu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-1/p41-yu/",
  abstract =     "The importance of the estimation of the number of
                 desired records for a given query is outlined. Two
                 algorithms for the estimation in the ``closest
                 neighbors problem'' are presented. The numbers of
                 operations of the algorithms are $ O(m \ell^2) $ and $
                 O(m \ell) $, where $m$ is the number of clusters and $
                 \ell $ is the ``length'' of the query.",
  acknowledgement = ack-nhfb,
  annote =       "Two Algorithms for the estimation in the `closest
                 neighbors problem'",
  classification = "901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "closest neighbors; database; estimate; information
                 science, CTYu selectivity TODS; query",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}",
}

@Article{Su:1978:CCS,
  author =       "Stanley Y. W. Su and Ahmed Emam",
  title =        "{CASDAL}: {{\em CAS\/}SM}'s {{\em DA\/}}ta {{\em
                 L\/}}anguage",
  journal =      j-TODS,
  volume =       "3",
  number =       "1",
  pages =        "57--91",
  month =        mar,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1978-3-1/p57-su/",
  abstract =     "CASDAL is a high level data language designed and
                 implemented for the database machine CASSM. The
                 language is used for the manipulation and maintenance
                 of a database using an unnormalized (hierarchically
                 structured) relational data model. It also has
                 facilities to define, modify, and maintain the data
                 model definition. The uniqueness of CASDAL lies in its
                 power to specify complex operations in terms of several
                 new language constructs and its concepts of tagging or
                 marking tuples and of matching values when walking from
                 relation to relation. The language is a result of a
                 top-down design and development effort for a database
                 machine in which high level language constructs are
                 directly supported by the hardware. This paper (1)
                 gives justifications for the use of an unnormalized
                 relational model on which the language is based, (2)
                 presents the CASDAL language constructs with examples,
                 and (3) describes CASSM's architecture and hardware
                 primitives which match closely with the high level
                 language constructs and facilitate the translation
                 process. This paper also attempts to show how the
                 efficiency of the language and the translation task can
                 be achieved and simplified in a system in which the
                 language is the result of a top-down system design and
                 development.",
  acknowledgement = ack-nhfb,
  annote =       "CASDAL is a high level data language for the database
                 machine CASSM. It uses an unnormalized (hierarchically
                 structured) relational data model. This paper (1)
                 justifies the use of this model (2) presents the Casdal
                 language constructs with examples, and (3) describes
                 CASSM's architecture.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative memory; computer programming languages;
                 data language; database; nonprocedural language; query
                 language; relational model; SYWSu hardware support
                 database machine TODS, data base systems",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3); Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Query
                 languages}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Relational databases}",
}

@Article{Chin:1978:SSD,
  author =       "Francis Y. Chin",
  title =        "Security in Statistical Databases for Queries with
                 Small Counts",
  journal =      j-TODS,
  volume =       "3",
  number =       "1",
  pages =        "92--104",
  month =        mar,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-1/p92-chin/p92-chin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-1/p92-chin/",
  abstract =     "The security problem of statistical databases
                 containing anonymous but individual records which may
                 be evaluated by queries about sums and averages is
                 considered. A model, more realistic than the previous
                 ones, is proposed, in which nonexisting records for
                 some keys can be allowed. Under the assumption that the
                 system protects the individual's information by the
                 well-known technique which avoids publishing summaries
                 with small counts, several properties about the system
                 and a necessary and sufficient condition for
                 compromising the database have been derived. The
                 minimum number of queries needed to compromise the
                 database is also discussed.",
  acknowledgement = ack-nhfb,
  annote =       "Under the assumption that the system protects the
                 individual's information by the technique which avoids
                 publishing summaries with small counts, properties
                 about the system and a necessary and sufficient
                 condition for compromising the database have been
                 derived.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compromisability; data base systems; data processing
                 --- security of data; data security; protection;
                 statistical databases",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Query processing};
                 Information Systems --- Database Management ---
                 Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}",
}

@Article{Hendrix:1978:DNL,
  author =       "Gary G. Hendrix and Earl D. Sacerdoti and Daniel
                 Sagalowicz and Jonathan Slocum",
  title =        "Developing a Natural Language Interface to Complex
                 Data",
  journal =      j-TODS,
  volume =       "3",
  number =       "2",
  pages =        "105--147",
  month =        jun,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/Ai.misc.bib; Compendex database;
                 Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-2/p105-hendrix/p105-hendrix.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-2/p105-hendrix/",
  abstract =     "Aspects of an intelligent interface that provides
                 natural language access to a large body of data
                 distributed over a computer network are described. The
                 overall system architecture is presented, showing how a
                 user is buffered from the actual database management
                 systems (DBMSs) by three layers of insulating
                 components. These layers operate in series to convert
                 natural language queries into calls to DBMSs at remote
                 sites. Attention is then focused on the first of the
                 insulating components, the natural language system. A
                 pragmatic approach to language access that has proved
                 useful for building interfaces to databases is
                 described and illustrated by examples. Special language
                 features that increase system usability, such as
                 spelling correction, processing of incomplete inputs,
                 and run-time system personalization, are also
                 discussed. The language system is contrasted with other
                 work in applied natural language processing, and the
                 system's limitations are analyzed.",
  acknowledgement = ack-nhfb,
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database access; human engineering;
                 intelligent access semantic grammar human engineering
                 run-time personalization, computer interfaces;
                 intelligent interface; natural language; Natural
                 Language, Intelligent Interface, Database Access,
                 Semantic Grammar, Human Engineering, Runtime
                 Personalization; run-time personalization; semantic
                 grammar",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3); Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Access
                 methods}",
}

@Article{Langdon:1978:NAP,
  author =       "Glen G. {Langdon, Jr.}",
  title =        "A Note on Associative Processors for Data Management",
  journal =      j-TODS,
  volume =       "3",
  number =       "2",
  pages =        "148--158",
  month =        jun,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-2/p148-langdon/p148-langdon.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-2/p148-langdon/",
  abstract =     "Associative ``logic-per-track'' processors for data
                 management are examined from a technological and
                 engineering point of view. Architectural and design
                 decisions are discussed. Some alternatives to the
                 design of comparators, garbage collection, and domain
                 extraction for architectures like the Relational
                 Associative Processor (RAP) are offered.",
  acknowledgement = ack-nhfb,
  annote =       "Associative ``logic-per-track'' processors for data
                 management are examined from a technological and
                 engineering point of view (RAP).",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative processors; computer operating systems;
                 data base systems, hardware support database machine
                 TODS; database machines",
  subject =      "Information Systems --- Database Management (H.2)",
}

@Article{Kluge:1978:DFM,
  author =       "Werner E. Kluge",
  title =        "Data File Management in Shift-Register Memories",
  journal =      j-TODS,
  volume =       "3",
  number =       "2",
  pages =        "159--177",
  month =        jun,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-2/p159-kluge/p159-kluge.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-2/p159-kluge/",
  abstract =     "The paper proposes a shift-register memory, structured
                 as a two-dimensional array of uniform shift-register
                 loops which are linked by flow-steering switches, whose
                 switch control scheme is tailored to perform with great
                 efficiency data management operations on sequentially
                 organized files. The memory operates in a linear
                 input/output mode to perform record insertion,
                 deletion, and relocation on an existing file, and in a
                 sublinear mode for rapid internal file movement to
                 expedite file positioning and record retrieval and
                 update operations.\par

                 The memory, implemented as a large capacity
                 charge-coupled device or magnetic domain memory,
                 permits efficient data management on very large
                 databases at the level of secondary storage and lends
                 itself to applications as a universal disk replacement,
                 particularly in database computers.",
  acknowledgement = ack-nhfb,
  annote =       "Shift-register memory, structured as a two-dimensional
                 array tailored to perform with great efficiency data
                 management operations on sequentially organized files",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data transformations; deletion; hardware support
                 database machine TODS, computer operating systems;
                 insertion; LIFO/FIFO operation modes; management of
                 sequentially organized files; record retrieval;
                 relocation; shift-register memories; updating",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3)",
}

@Article{Rosenkrantz:1978:SLC,
  author =       "David J. Rosenkrantz and Richard E. Stearns and Philip
                 M. {Lewis, II}",
  title =        "System Level Concurrency Control for Distributed
                 Database Systems",
  journal =      j-TODS,
  volume =       "3",
  number =       "2",
  pages =        "178--198",
  month =        jun,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-2/p178-rosenkrantz/p178-rosenkrantz.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-2/p178-rosenkrantz/",
  abstract =     "A distributed database system is one in which the
                 database is spread among several sites and application
                 programs ``move'' from site to site to access and
                 update the data they need. The concurrency control is
                 that portion of the system that responds to the read
                 and write requests of the application programs. Its job
                 is to maintain the global consistency of the
                 distributed database while ensuring that the
                 termination of the application programs is not
                 prevented by phenomena such as deadlock. We assume each
                 individual site has its own local concurrency control
                 which responds to requests at that site and can only
                 communicate with concurrency controls at other sites
                 when an application program moves from site to site,
                 terminates, or aborts.\par

                 This paper presents designs for several distributed
                 concurrency controls and demonstrates that they work
                 correctly. It also investigates some of the
                 implications of global consistency of a distributed
                 database and discusses phenomena that can prevent
                 termination of application programs.",
  acknowledgement = ack-nhfb,
  annote =       "Later arriving transactions may be aborted if not yet
                 in the commit stage.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; consistency; data base systems; database;
                 deadlock; deadly embrace; distributed; integrity; lock;
                 readers and writers; restart; rollback; transaction",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}",
}

@Article{Delobel:1978:NHD,
  author =       "Claude Delobel",
  title =        "Normalization and Hierarchical Dependencies in the
                 Relational Data Model",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "201--222",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p201-delobel/p201-delobel.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p201-delobel/",
  abstract =     "The purpose of this paper is to present a new approach
                 to the conceptual design of logical schemata for
                 relational databases. One-to-one, one-to-many, and
                 many-to-many relationships between the attributes of
                 database relations are modeled by means of functional
                 dependencies and multivalued dependencies. A new type
                 of dependency is introduced: first-order hierarchical
                 decomposition. The properties of this new type of
                 dependency are studied and related to the normalization
                 process of relations. The relationship between the
                 concept of first-order hierarchical decomposition and
                 the notion of hierarchical organization of data is
                 discussed through the normalization process.",
  acknowledgement = ack-nhfb,
  annote =       "One-to-one, one-to-many relationships between the
                 attributes of database relations are modeled by means
                 of functional dependencies and multivalued
                 dependencies. A new type of dependency is first-order
                 hierarchical.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; data model; first-order
                 hierarchical dependency; functional dependency;
                 hierarchical schema; multivalued dependency;
                 normalization process; relational database; relational
                 model",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Database Management --- Logical Design (H.2.1):
                 {\bf Schema and subschema}",
}

@Article{Smith:1978:SPD,
  author =       "Alan Jay Smith",
  title =        "Sequentiality and Prefetching in Database Systems",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "223--247",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p223-smith/p223-smith.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p223-smith/",
  abstract =     "Sequentiality of access is an inherent characteristic
                 of many database systems. We use this observation to
                 develop an algorithm which selectively prefetches data
                 blocks ahead of the point of reference. The number of
                 blocks prefetched is chosen by using the empirical run
                 length distribution and conditioning on the observed
                 number of sequential block references immediately
                 preceding reference to the current block. The optimal
                 number of blocks to prefetch is estimated as a function
                 of a number of ``costs,'' including the cost of
                 accessing a block not resident in the buffer (a miss),
                 the cost of fetching additional data blocks at fault
                 times, and the cost of fetching blocks that are never
                 referenced. We estimate this latter cost, described as
                 memory pollution, in two ways. We consider the
                 treatment (in the replacement algorithm) of prefetched
                 blocks, whether they are treated as referenced or not,
                 and find that it makes very little difference. Trace
                 data taken from an operational IMS database system is
                 analyzed and the results are presented. We show how to
                 determine optimal block sizes. We find that
                 anticipatory fetching of data can lead to significant
                 improvements in system operation.",
  acknowledgement = ack-nhfb,
  annote =       "An algorithm which selectively prefetches data blocks
                 ahead of the point of reference. The optimal number of
                 blocks to prefetch is estimated as a function, the cost
                 (a miss), the cost of fetching blocks that are never
                 referenced of fetching additional data blocks, and the
                 cost of fetching blocks that are never referenced.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer management; database systems; dynamic
                 programming; IMS; paging; prefetching; read-ahead
                 caches caching buffer management TODS, data base
                 systems; sequentiality",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4)",
}

@Article{Schlageter:1978:PSD,
  author =       "Gunter Schlageter",
  title =        "Process Synchronization in Database Systems",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "248--271",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See errata report in \cite{Bernstein:1979:CPS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p248-schlageter/p248-schlageter.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p248-schlageter/",
  abstract =     "The problem of process synchronization in database
                 systems is analyzed in a strictly systematic way, on a
                 rather abstract level; the abstraction is chosen such
                 that the essential characteristics of the problem can
                 be distinctly modeled and investigated. Using a small
                 set of concepts, a consistent description of the whole
                 problem is developed; many widely used, but only
                 vaguely defined, notions are defined exactly within
                 this framework. The abstract treatment of the problem
                 immediately leads to practically useful insights with
                 respect to possible solutions, although
                 implementational aspects are not discussed in detail.",
  acknowledgement = ack-nhfb,
  annote =       "Process synchronization in database systems is
                 analyzed on a rather abstract level. [see Bernstein for
                 comments]",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database consistency; database systems; integrity;
                 locking; operating system support TODS, data base
                 systems; parallel process systems; process
                 synchronization",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management (H.2)",
}

@Article{Hollaar:1978:SMP,
  author =       "Lee A. Hollaar",
  title =        "Specialized Merge Processor Networks for Combining
                 Sorted Lists",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "272--284",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p272-hollaar/p272-hollaar.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p272-hollaar/",
  abstract =     "In inverted file database systems, index lists
                 consisting of pointers to items within the database are
                 combined to form a list of items which potentially
                 satisfy a user's query. This list merging is similar to
                 the common data processing operation of combining two
                 or more sorted input files to form a sorted output
                 file, and generally represents a large percentage of
                 the computer time used by the retrieval system.
                 Unfortunately, a general purpose digital computer is
                 better suited for complicated numeric processing rather
                 than the simple combining of data. The overhead of
                 adjusting and checking pointers, aligning data, and
                 testing for completion of the operation overwhelm the
                 processing of the data.\par

                 A specialized processor can perform most of these
                 overhead operations in parallel with the processing of
                 the data, thereby offering speed increases by a factor
                 from 10 to 100 over conventional computers, depending
                 on whether a higher speed memory is used for storing
                 the lists. These processors can also be combined into
                 networks capable of directly forming the result of a
                 complex expression, with another order of magnitude
                 speed increase possible. The programming and operation
                 of these processors and networks is discussed, and
                 comparisons are made with the speed and efficiency of
                 conventional general purpose computers.",
  acknowledgement = ack-nhfb,
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "backend processors; binary tree networks; computer
                 architecture --- program processors; computer system
                 architecture; full text retrieval systems; hardware
                 support database machine TODS, data base systems;
                 inverted file databases; nonnumeric processing;
                 pipelined networks; sorted list merging",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3);
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Haerder:1978:IGA,
  author =       "Theo Haerder",
  title =        "Implementing a Generalized Access Path Structure for a
                 Relational Database System",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "285--298",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p285-haerder/p285-haerder.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p285-haerder/",
  abstract =     "A new kind of implementation technique for access
                 paths connecting sets of tuples qualified by attribute
                 values is described. It combines the advantages of
                 pointer chain and multilevel index implementation
                 techniques. Compared to these structures the
                 generalized access path structure is at least
                 competitive in performing retrieval and update
                 operations, while a considerable storage space saving
                 is gained. Some additional features of this structure
                 support $m$-way joins and the evaluation of
                 multirelation queries, and allow efficient checks of
                 integrity assertions and simple reorganization
                 schemes.",
  acknowledgement = ack-nhfb,
  annote =       "Implementation technique for access paths connecting
                 sets of tuples qualified by attribute values combines
                 the advantages of pointer chains and multilevel
                 indexes. Features of this structure support m-way
                 joins.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems, Harder multi-relation indices
                 TODS",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Yu:1978:PP,
  author =       "C. T. Yu and M. K. Siu and K. Lam",
  title =        "On a Partitioning Problem",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "299--309",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p299-yu/p299-yu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p299-yu/",
  abstract =     "This paper investigates the problem of locating a set
                 of ``boundary points'' of a large number of records.
                 Conceptually, the boundary points partition the records
                 into subsets of roughly the same number of elements,
                 such that the key values of the records in one subset
                 are all smaller or all larger than those of the records
                 in another subset. We guess the locations of the
                 boundary points by linear interpolation and check their
                 accuracy by reading the key values of the records on
                 one pass. This process is repeated until all boundary
                 points are determined. Clearly, this problem can also
                 be solved by performing an external tape sort. Both
                 analytical and empirical results indicate that the
                 number of passes required is small in comparison with
                 that in an external tape sort. This kind of record
                 partitioning may be of interest in setting up a
                 statistical database system.",
  acknowledgement = ack-nhfb,
  annote =       "Boundary points partition the records into subsets of
                 roughly the same number of elements. We guess the
                 locations of the boundary points by linear
                 interpolation and check their accuracy by reading the
                 key values of the records on one pass. This process is
                 repeated until all boundary points are determined.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "CTYu TODS, data base systems; external sort; key
                 value; partition; passes; tape probability",
  subject =      "Computing Methodologies --- Image Processing And
                 Computer Vision --- Segmentation (I.4.6): {\bf Region
                 growing, partitioning}",
}

@Article{Fagin:1978:AM,
  author =       "Ronald Fagin",
  title =        "On an Authorization Mechanism",
  journal =      j-TODS,
  volume =       "3",
  number =       "3",
  pages =        "310--319",
  month =        sep,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-3/p310-fagin/p310-fagin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-3/p310-fagin/",
  abstract =     "Griffiths and Wade ({\em ACM Trans. Database Syst.
                 1,3}, (Sept. 1976), 242-255) have defined a dynamic
                 authorization mechanism that goes beyond the
                 traditional password approach. A database user can
                 grant or revoke privileges (such as to read, insert, or
                 delete) on a file that he has created. Furthermore, he
                 can authorize others to grant these same privileges.
                 The database management system keeps track of a
                 directed graph, emanating from the creator, of granted
                 privileges. The nodes of the graph correspond to users,
                 and the edges (each of which is labeled with a
                 timestamp) correspond to grants. The edges are of two
                 types, corresponding to whether or not the recipient of
                 the grant has been given the option to make further
                 grants of this privilege. Furthermore, for each pair $
                 A, B $ of nodes, there can be no more than one edge of
                 each type from $A$ to $B$. We modify this approach by
                 allowing graphs in which there can be multiple edges of
                 each type from one node to another. We prove
                 correctness (in a certain strong sense) for our
                 modified authorization mechanism. Further, we show by
                 example that under the original mechanism, the system
                 might forbid some user from exercising or granting a
                 privilege that he ``should'' be allowed to exercise or
                 grant.",
  acknowledgement = ack-nhfb,
  annote =       "We prove correctness for our modified authorization
                 mechanism",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access control; authorization; data base systems;
                 database; privacy; proof of correctness; protection;
                 revocation; security",
  subject =      "Information Systems --- Database Management ---
                 Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}",
}

@Article{Salton:1978:GSC,
  author =       "G. Salton and A. Wong",
  title =        "Generation and Search of Clustered Files",
  journal =      j-TODS,
  volume =       "3",
  number =       "4",
  pages =        "321--346",
  month =        dec,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-4/p321-salton/p321-salton.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-4/p321-salton/",
  abstract =     "A classified, or clustered file is one where related,
                 or similar records are grouped into classes, or
                 clusters of items in such a way that all items within a
                 cluster are jointly retrievable. Clustered files are
                 easily adapted to broad and narrow search strategies,
                 and simple file updating methods are available. An
                 inexpensive file clustering method applicable to large
                 files is given together with appropriate file search
                 methods. An abstract model is then introduced to
                 predict the retrieval effectiveness of various search
                 methods in a clustered file environment. Experimental
                 evidence is included to test the versatility of the
                 model and to demonstrate the role of various parameters
                 in the cluster search process.",
  acknowledgement = ack-nhfb,
  annote =       "Automatic classification for information retrieval",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "automatic classification; cluster searching; clustered
                 files; data processing; fast classification; file
                 organization; probabilistic models",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Clustering}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Search
                 process}",
}

@Article{Banerjee:1978:CCD,
  author =       "Jayanta Banerjee and Richard I. Baum and David K.
                 Hsiao",
  title =        "Concepts and Capabilities of a Database Computer",
  journal =      j-TODS,
  volume =       "3",
  number =       "4",
  pages =        "347--384",
  month =        dec,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-4/p347-banerjee/p347-banerjee.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-4/p347-banerjee/",
  abstract =     "The concepts and capabilities of a database computer
                 (DBC) are given in this paper. The proposed design
                 overcomes many of the traditional problems of database
                 system software and is one of the first to describe a
                 complete data-secure computer capable of handling large
                 databases.\par

                 This paper begins by characterizing the major problems
                 facing today's database system designers. These
                 problems are intrinsically related to the nature of
                 conventional hardware and can only be solved by
                 introducing new architectural concepts. Several such
                 concepts are brought to bear in the later sections of
                 this paper. These architectural principles have a major
                 impact upon the design of the system and so they are
                 discussed in some detail. A key aspect of these
                 principles is that they can be implemented with
                 near-term technology. The rest of the paper is devoted
                 to the functional characteristics and the theory of
                 operation of the DBC. The theory of operation is based
                 on a series of abstract models of the components and
                 data structures employed by the DBC. These models are
                 used to illustrate how the DBC performs access
                 operations, manages data structures and security
                 specifications, and enforces security requirements.
                 Short Algol-like algorithms are used to show how these
                 operations are carried out. This part of the paper
                 concludes with a high-level description of the DBC
                 organization. The actual details of the DBC hardware
                 are quite involved and so their presentation is not the
                 subject of this paper.\par

                 A sample database is included in the Appendix to
                 illustrate the working of the security and clustering
                 mechanisms of the DBC.",
  acknowledgement = ack-nhfb,
  annote-1 =     "The concepts of a database computer (DBC) are given.
                 The theory of operation is based on abstract models.
                 The DBC performs access operations, manages data
                 structures and security specifications.",
  annote-2 =     "The correct author order (from the running heads and
                 table of contents) is Banerjee, Baum, Hsiao: the
                 article cover page has Banerjee, Hsiao, Baum, because
                 the first two share a common address.",
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "clustering; content-addressable memory; data base
                 systems; database computers; hardware support machine
                 TODS, computer architecture; keywords; mass memory;
                 performance; security; structure memory",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Clustering}",
}

@Article{Bradley:1978:EOC,
  author =       "J. Bradley",
  title =        "An Extended Owner-Coupled Set Data Model and Predicate
                 Calculus for Database Management",
  journal =      j-TODS,
  volume =       "3",
  number =       "4",
  pages =        "385--416",
  month =        dec,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-4/p385-bradley/p385-bradley.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-4/p385-bradley/",
  abstract =     "A data model is presented, based on the extension of
                 the concept of a DBTG owner-coupled set to permit {\em
                 static\/} and {\em dynamic\/} sets and a new kind of
                 set referred to as a {\em virtual\/} set. The notion of
                 {\em connection fields\/} is introduced, and it is
                 shown how connection fields may be used to construct
                 derived information bearing set names, and hence permit
                 the specification of (dynamic) sets which are not
                 predeclared in a schema. Virtual sets are shown to
                 reflect the functional dependencies which can exist
                 within a file. A technique which permits the data model
                 to be fully described diagrammatically by {\em extended
                 Bachman diagrams\/} is described. A predicate calculus
                 for manipulation of this data model is presented.
                 Expressions written in this calculus are compared with
                 corresponding expressions in a relational predicate
                 calculus, DSL ALPHA. An argument for the relational
                 completeness of the language is given.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Codasyl DBTG; connection field; data base systems; DSL
                 AlPHA; dynamic set; extended Bachman diagram; extended
                 owner-coupled set data model; extended owner-coupled
                 set predicate calculus; functional dependency;
                 information bearing set name; owner-coupled set; static
                 set; virtual set",
  subject =      "Information Systems --- Database Management (H.2)",
}

@Article{Shneiderman:1978:IHF,
  author =       "Ben Shneiderman",
  title =        "Improving the Human Factors Aspect of Database
                 Interactions",
  journal =      j-TODS,
  volume =       "3",
  number =       "4",
  pages =        "417--439",
  month =        dec,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-4/p417-shneiderman/p417-shneiderman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-4/p417-shneiderman/",
  abstract =     "The widespread dissemination of computer and
                 information systems to nontechnically trained
                 individuals requires a new approach to the design and
                 development of database interfaces. This paper provides
                 the motivational background for controlled
                 psychological experimentation in exploring the
                 person\slash machine interface. Frameworks for the
                 reductionist approach are given, research methods
                 discussed, research issues presented, and a small
                 experiment is offered as an example of what can be
                 accomplished. This experiment is a comparison of
                 natural and artificial language query facilities.
                 Although subjects posed approximately equal numbers of
                 valid queries with either facility, natural language
                 users made significantly more invalid queries which
                 could not be answered from the database that was
                 described.",
  acknowledgement = ack-nhfb,
  classification = "461; 723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; data models; database systems;
                 experimentation; human engineering; human factors;
                 natural language interfaces; psychology; query
                 languages; systems science and cybernetics --- man
                 machine systems",
  subject =      "Information Systems --- Models and Principles ---
                 User/Machine Systems (H.1.2): {\bf Human factors};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Query languages}; Information Systems --- Database
                 Management --- Systems (H.2.4)",
}

@Article{Comer:1978:DOI,
  author =       "Douglas Comer",
  title =        "The Difficulty of Optimum Index Selection",
  journal =      j-TODS,
  volume =       "3",
  number =       "4",
  pages =        "440--445",
  month =        dec,
  year =         "1978",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1978-3-4/p440-comer/p440-comer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1978-3-4/p440-comer/",
  abstract =     "Given a file on a secondary store in which each record
                 has several attributes, it is usually advantageous to
                 build an index mechanism to decrease the cost of
                 conducting transactions to the file. The problem of
                 selecting attributes over which to index has been
                 studied in the context of various storage structures
                 and access assumptions. One algorithm to make an
                 optimum index selection requires 2 $k$ steps in the
                 worst case, where $k$ is the number of attributes in
                 the file. We examine the question of whether a more
                 efficient algorithm might exist and show that even
                 under a simple cost criterion the problem is
                 computationally difficult in a precise sense. Our
                 results extend directly to other related problems where
                 the cost of the index depends on fixed values which are
                 assigned to each attribute. Some practical implications
                 are discussed.",
  acknowledgement = ack-nhfb,
  annote =       "Theorem: optimum index selection problem OISP is
                 NP-complete for files of degree $ d \ge 2 $.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "attribute selection; complexity; index selection;
                 physical database design; secondary index",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Content Analysis and Indexing (H.3.1):
                 {\bf Indexing methods}",
}

@Article{Babb:1979:IRD,
  author =       "E. Babb",
  title =        "Implementing a Relational Database by Means of
                 Specialized Hardware",
  journal =      j-TODS,
  volume =       "4",
  number =       "1",
  pages =        "1--29",
  month =        mar,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-1/p1-babb/p1-babb.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-1/p1-babb/",
  abstract =     "New hardware is described which allows the rapid
                 execution of queries demanding the joining of
                 physically stored relations. The main feature of the
                 hardware is a special store which can rapidly remember
                 or recall data. This data might be pointers from one
                 file to another, in which case the memory helps with
                 queries on joins of files. Alternatively, the memory
                 can help remove redundant data during projection[s??],
                 giving a considerable speed advantage over conventional
                 hardware.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "bit array; CAFS; content addressing; database;
                 hardware support machine bit vector filter
                 probabilistic semi-join TODS, data base systems;
                 hashing; information retrieval; join; projection;
                 relational model; selection; special hardware",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Beeri:1979:CPR,
  author =       "Catriel Beeri and Philip A. Bernstein",
  title =        "Computational Problems Related to the Design of Normal
                 Form Relational Schemas",
  journal =      j-TODS,
  volume =       "4",
  number =       "1",
  pages =        "30--59",
  month =        mar,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Compiler/prog.lang.theory.bib;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: errata in ACM Transactions on
                 Database Systems, Vol. 4 No. 3, Sep. 1979, pp. 396.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-1/p30-beeri/p30-beeri.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-1/p30-beeri/",
  abstract =     "Problems related to functional dependencies and the
                 algorithmic design of relational schemas are examined.
                 Specifically, the following results are presented: (1)
                 a tree model of derivations of functional dependencies
                 from other functional dependencies; (2) a linear-time
                 algorithm to test if a functional dependency is in the
                 closure of a set of functional dependencies; (3) a
                 quadratic-time implementation of Bernstein's third
                 normal form schema synthesis algorithm.
                 \par

                 Furthermore, it is shown that most interesting
                 algorithmic questions about Boyce-Codd normal form and
                 keys are {\em NP\/}-complete and are therefore probably
                 not amenable to fast algorithmic solutions.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Relational databases}",
}

@Article{Lockemann:1979:DAD,
  author =       "Peter C. Lockemann and Heinrich C. Mayr and Wolfgang
                 H. Weil and Wolfgang H. Wohlleber",
  title =        "Data Abstractions for Database Systems",
  journal =      j-TODS,
  volume =       "4",
  number =       "1",
  pages =        "60--75",
  month =        mar,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-1/p60-lockemann/p60-lockemann.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-1/p60-lockemann/",
  abstract =     "Data abstractions were originally conceived as a
                 specification tool in programming. They also appear to
                 be useful for exploring and explaining the capabilities
                 and shortcomings of the data definition and
                 manipulation facilities of present-day database
                 systems. Moreover they may lead to new approaches to
                 the design of these facilities. In the first section
                 the paper introduces an axiomatic method for specifying
                 data abstractions and, on that basis, gives precise
                 meaning to familiar notions such as data model, data
                 type, and database schema. In a second step the various
                 possibilities for specifying data types within a given
                 data model are examined and illustrated. It is shown
                 that data types prescribe the individual operations
                 that are allowed within a database. Finally, some
                 additions to the method are discussed which permit the
                 formulation of interrelationships between arbitrary
                 operations.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "abstract data type; data abstraction; data base
                 systems; data definition language; data manipulation
                 language; data model; data structure; data type;
                 database consistency; database design; database schema;
                 integrity constraints; specification",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Data
                 manipulation languages (DML)}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Schema and
                 subschema}",
}

@Article{Denning:1979:TTS,
  author =       "Dorothy E. Denning and Peter J. Denning and Mayer D.
                 Schwartz",
  title =        "The Tracker: a Threat to Statistical Database
                 Security",
  journal =      j-TODS,
  volume =       "4",
  number =       "1",
  pages =        "76--96",
  month =        mar,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-1/p76-denning/p76-denning.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-1/p76-denning/",
  abstract =     "The query programs of certain databases report raw
                 statistics for query sets, which are groups of records
                 specified implicitly by a characteristic formula. The
                 raw statistics include query set size and sums of
                 powers of values in the query set. Many users and
                 designers believe that the individual records will
                 remain confidential as long as query programs refuse to
                 report the statistics of query sets which are too
                 small. It is shown that the compromise of small query
                 sets can in fact almost always be accomplished with the
                 help of characteristic formulas called trackers. J.
                 Schl{\"o}rer's individual tracker is reviewed; it is
                 derived from known characteristics of a given
                 individual and permits deducing additional
                 characteristics he may have. The general tracker is
                 introduced: It permits calculating statistics for
                 arbitrary query sets, without requiring preknowledge of
                 anything in the database. General trackers always exist
                 if there are enough distinguishable classes of
                 individuals in the database, in which case the trackers
                 have a simple form. Almost all databases have a general
                 tracker, and general trackers are almost always easy to
                 find. Security is not guaranteed by the lack of a
                 general tracker.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "confidentiality; data base systems; data processing;
                 data security; database security; secure query
                 functions; statistical database; tracker",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0): {\bf Security, integrity, and
                 protection**}; Information Systems --- Database
                 Management --- Database Applications (H.2.8): {\bf
                 Statistical databases}",
}

@Article{Dobkin:1979:SDP,
  author =       "David Dobkin and Anita K. Jones and Richard J.
                 Lipton",
  title =        "Secure Databases: Protection Against User Influence",
  journal =      j-TODS,
  volume =       "4",
  number =       "1",
  pages =        "97--106",
  month =        mar,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-1/p97-dobkin/p97-dobkin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-1/p97-dobkin/",
  abstract =     "Users may be able to compromise databases by asking a
                 series of questions and then inferring new information
                 from the answers. The complexity of protecting a
                 database against this technique is discussed here.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compromise; data base systems; database; inference;
                 information flow; protection; security; Security TODS,
                 data processing; statistical query",
  subject =      "Information Systems --- Database Management ---
                 Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}",
}

@Article{Kent:1979:LRB,
  author =       "William Kent",
  title =        "Limitations of Record-Based Information Models",
  journal =      j-TODS,
  volume =       "4",
  number =       "1",
  pages =        "107--131",
  month =        mar,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-1/p107-kent/p107-kent.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-1/p107-kent/",
  abstract =     "Record structures are generally efficient, familiar,
                 and easy to use for most current data processing
                 applications. But they are not complete in their
                 ability to represent information, nor are they fully
                 self-describing.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "conceptual model; data model; entities; first normal
                 form; information model; normalization; records;
                 relationships; semantic model",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Normal forms}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Relational databases}",
}

@Article{Yao:1979:OQE,
  author =       "S. Bing Yao",
  title =        "Optimization of Query Evaluation Algorithms",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "133--155",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p133-yao/p133-yao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p133-yao/",
  abstract =     "A model of database storage and access is presented.
                 The model represents many evaluation algorithms as
                 special cases, and helps to break a complex algorithm
                 into simple access operations. Generalized access cost
                 equations associated with the model are developed and
                 analyzed. Optimization of these cost equations yields
                 an optimal access algorithm which can be synthesized by
                 a query subsystem whose design is based on the modular
                 access operations.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; data base systems;
                 data manipulation language; database optimization;
                 inverted file; query language; query languages; query
                 optimization; relational data model",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf Data
                 manipulation languages (DML)}",
}

@Article{Schwartz:1979:LQS,
  author =       "M. D. Schwartz and D. E. Denning and P. J. Denning",
  title =        "Linear Queries in Statistical Databases",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "156--167",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p156-schwartz/p156-schwartz.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p156-schwartz/",
  abstract =     "A database is compromised if a user can determine the
                 data elements associated with keys which he did not
                 know previously. If it is possible, compromise can be
                 achieved by posing a finite set of queries over sets of
                 data elements and employing initial information to
                 solve the resulting system of equations. Assuming the
                 allowable queries are linear, that is, weighted sums of
                 data elements, we show how compromise can be achieved
                 and we characterize the maximal initial information
                 permitted of a user in a secure system. When compromise
                 is possible, the initial information and the number of
                 queries required to achieve it is surprisingly small.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "confidentiality; data base systems; data processing
                 --- security of data; data security; database security;
                 inference; linear query; secure query functions;
                 statistical database",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Query processing};
                 Information Systems --- Database Management --- General
                 (H.2.0): {\bf Security, integrity, and protection**}",
}

@Article{Aho:1979:OPM,
  author =       "Alfred V. Aho and Jeffrey D. Ullman",
  title =        "Optimal Partial-Match Retrieval When Fields are
                 Independently Specified",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "168--179",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Graphics/siggraph/79.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p168-aho/p168-aho.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p168-aho/",
  abstract =     "This paper considers the design of a system to answer
                 partial-match queries from a file containing a
                 collection of records, each record consisting of a
                 sequence of fields. A partial-match query is a
                 specification of values for zero or more fields of a
                 record, and the answer to a query is a listing of all
                 records in the file whose fields match the specified
                 values.\par

                 A design is considered in which the file is stored in a
                 set of bins. A formula is derived for the optimal
                 number of bits in a bin address to assign to each
                 field, assuming the probability that a given field is
                 specified in a query is independent of what other
                 fields are specified. Implications of the optimality
                 criterion on the size of bins are also discussed.",
  acknowledgement = ack-nhfb,
  classification = "723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative searching; data processing --- file
                 organization; file organization; hashing; information
                 retrieval; information science; partial-match
                 retrieval; searching",
  oldlabel =     "geom-2",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Retrieval models}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Search process};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Thomas:1979:MCA,
  author =       "Robert H. Thomas",
  title =        "A Majority Consensus Approach to Concurrency Control
                 for Multiple Copy Databases",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "180--209",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib;
                 Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p180-thomas/p180-thomas.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p180-thomas/",
  abstract =     "A ``majority consensus'' algorithm which represents a
                 new solution to the update synchronization problem for
                 multiple copy databases is presented. The algorithm
                 embodies distributed control and can function
                 effectively in the presence of communication and
                 database site outages. The correctness of the algorithm
                 is demonstrated and the cost of using it is analyzed.
                 Several examples that illustrate aspects of the
                 algorithm operation are included in the Appendix.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "clock synchronization; computer networks; concurrency
                 control; data base systems; distributed computation;
                 distributed control; distributed databases;
                 multiprocess systems; update synchronization",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Information Storage and Retrieval --- Systems and
                 Software (H.3.4): {\bf Distributed systems}",
}

@Article{Ries:1979:LGR,
  author =       "Daniel R. Ries and Michael R. Stonebraker",
  title =        "Locking Granularity Revisited",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "210--227",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p210-ries/p210-ries.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p210-ries/",
  abstract =     "Locking granularity refers to the size and hence the
                 number of locks used to ensure the consistency of a
                 database during multiple concurrent updates. In an
                 earlier simulation study we concluded that coarse
                 granularity, such as area or file locking, is to be
                 preferred to fine granularity such as individual page
                 or record locking.\par

                 However, alternate assumptions than those used in the
                 original paper can change that conclusion. First, we
                 modified the assumptions concerning the placement of
                 the locks on the database with respect to the accessing
                 transactions. In the original model the locks were
                 assumed to be well placed. Under worse case and random
                 placement assumptions when only very small transactions
                 access the database, fine granularity is preferable.
                 \par

                 Second, we extended the simulation to model a lock
                 hierarchy where large transactions use large locks and
                 small transactions use small locks. In this scenario,
                 again under the random and worse case lock placement
                 assumptions, fine granularity is preferable if all
                 transactions accessing more than 1 percent of the
                 database use large locks.\par

                 Finally, the simulation was extended to model a ``claim
                 as needed'' locking strategy together with the
                 resultant possibility of deadlock. In the original
                 study all locks were claimed in one atomic operation at
                 the beginning of a transaction. The claim as needed
                 strategy does not change the conclusions concerning the
                 desired granularity.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; database management; locking granularity;
                 locking hierarchies; multiple updates; TODS Ingres,
                 data base systems",
  subject =      "Information Systems --- Database Management ---
                 Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Concurrency}; Information Systems --- Database
                 Management (H.2); Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Deadlock
                 avoidance}",
}

@Article{Burkhard:1979:PMH,
  author =       "Walter A. Burkhard",
  title =        "Partial-Match Hash Coding: Benefits of Redundancy",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "228--239",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Graphics/siggraph/79.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p228-burkhard/p228-burkhard.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p228-burkhard/",
  abstract =     "File designs suitable for retrieval from a file of
                 $k$-field records when queries may be partially
                 specified are examined. Storage redundancy is
                 introduced to obtain improved worst-case and
                 average-case performances. The resulting storage
                 schemes are appropriate for replicated distributed
                 database environments; it is possible to improve the
                 overall average and worst-case behavior for query
                 response as well as provide an environment with very
                 high reliability. Within practical systems it will be
                 possible to improve the query response time performance
                 as well as reliability over comparable systems without
                 replication.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access methods; algorithms; analysis; data base
                 systems; data processing --- file organization; data
                 structures; database systems; replication; searching",
  oldlabel =     "geom-100",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Search process}",
}

@Article{Raghavan:1979:EDR,
  author =       "Vijay V. Raghavan and C. T. Yu",
  title =        "Experiments on the Determination of the Relationships
                 Between Terms",
  journal =      j-TODS,
  volume =       "4",
  number =       "2",
  pages =        "240--260",
  month =        jun,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-2/p240-raghavan/p240-raghavan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-2/p240-raghavan/",
  abstract =     "The retrieval effectiveness of an automatic method
                 that uses relevance judgments for the determination of
                 positive as well as negative relationships between
                 terms is evaluated. The term relationships are
                 incorporated into the retrieval process by using a
                 generalized similarity function that has a term match
                 component, a positive term relationship component, and
                 a negative term relationship component. Two strategies,
                 query partitioning and query clustering, for the
                 evaluation of the effectiveness of the term
                 relationships are investigated. The latter appears to
                 be more attractive from linguistic as well as economic
                 points of view. The positive and the negative
                 relationships are verified to be effective both when
                 used individually, and in combination. The importance
                 attached to the term relationship components relative
                 to that of term match component is found to have a
                 substantial effect on the retrieval performance. The
                 usefulness of discriminant analysis as a technique for
                 determining the relative importance of these components
                 is investigated.",
  acknowledgement = ack-nhfb,
  classification = "723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "antonym; document retrieval; feedback; information
                 science; pseudoclassification; semantics; statistical
                 discrimination; synonym; term associations; thesaurus",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3);
                 Information Systems --- Information Storage and
                 Retrieval --- Content Analysis and Indexing (H.3.1):
                 {\bf Thesauruses}; Information Systems --- Database
                 Management --- Database Applications (H.2.8): {\bf
                 Statistical databases}",
}

@Article{Lipski:1979:SIC,
  author =       "Witold {Lipski, Jr.}",
  title =        "On Semantic Issues Connected with Incomplete
                 Information Databases",
  journal =      j-TODS,
  volume =       "4",
  number =       "3",
  pages =        "262--296",
  month =        sep,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/nonmono.bib; Compendex database;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-3/p262-lipski/p262-lipski.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-3/p262-lipski/",
  abstract =     "Various approaches to interpreting queries in a
                 database with incomplete information are discussed. A
                 simple model of a database is described, based on
                 attributes which can take values in specified attribute
                 domains. Information incompleteness means that instead
                 of having a single value of an attribute, we have a
                 subset of the attribute domain, which represents our
                 knowledge that the actual value, though unknown, is one
                 of the values in this subset. This extends the idea of
                 Codd's null value, corresponding to the case when this
                 subset is the whole attribute domain. A simple query
                 language to communicate with such a system is described
                 and its various semantics are precisely defined. We
                 emphasize the distinction between two different
                 interpretations of the query language--the external
                 one, which refers the queries directly to the real
                 world modeled in an incomplete way by the system, and
                 the internal one, under which the queries refer to the
                 system's information about this world, rather than to
                 the world itself. Both external and internal
                 interpretations are provided with the corresponding
                 sets of axioms which serve as a basis for equivalent
                 transformations of queries. The technique of equivalent
                 transformations of queries is then extensively
                 exploited for evaluating the interpretation of (i.e.,
                 the response to) a query.",
  acknowledgement = ack-nhfb,
  annote =       "Attributes can take values in specified attribute
                 domains. Instead a single value of an attribute, we
                 have a subset of the attribute domain, which represents
                 our knowledge that the actual value, though unknown, is
                 one of the values in this subset. This extends the idea
                 of Codd's null value, corresponding to the case when
                 this subset is the whole attribute domain.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database; incomplete information;
                 model logic; null values; query language semantics;
                 relational model",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Relational databases}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf Query
                 languages}",
}

@Article{Aho:1979:TJR,
  author =       "A. V. Aho and C. Beeri and J. D. Ullman",
  title =        "The theory of joins in relational databases",
  journal =      j-TODS,
  volume =       "4",
  number =       "3",
  pages =        "297--314",
  month =        sep,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See corrigendum \cite{Ullman:1983:CTJ}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-3/p297-aho/p297-aho.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-3/p297-aho/",
  abstract =     "Answering queries in a relational database often
                 requires that the natural join of two or more relations
                 be computed. However, the result of a join may not be
                 what one expects. In this paper we give efficient
                 algorithms to determine whether the join of several
                 relations has the intuitively expected value (is {\em
                 lossless\/}) and to determine whether a set of
                 relations has a subset with a lossy join. These
                 algorithms assume that all data dependencies are
                 functional. We then discuss the extension of our
                 techniques to the case where data dependencies are
                 multivalued.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; decomposition of database schemes;
                 functional dependencies; lossless join; multivalued
                 dependencies; natural join; projection of dependencies;
                 relational databases",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Models and Principles ---
                 Systems and Information Theory (H.1.1)",
}

@Article{Fagin:1979:EHF,
  author =       "Ronald Fagin and J{\"u}rg Nievergelt and Nicholas
                 Pippenger and H. Raymond Strong",
  key =          "Fagin et al.",
  title =        "Extendible Hashing --- a Fast Access Method for
                 Dynamic Files",
  journal =      j-TODS,
  volume =       "4",
  number =       "3",
  pages =        "315--344",
  month =        sep,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/bin-packing.bib; Misc/is.bib",
  note =         "Also published in/as: IBM Research Report RJ2305, Jul.
                 1978. See \cite{Regnier:1985:AGF}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-3/p315-fagin/p315-fagin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-3/p315-fagin/",
  abstract =     "Extendible hashing is a new access technique, in which
                 the user is guaranteed no more than two page faults to
                 locate the data associated with a given unique
                 identifier, or key. Unlike conventional hashing,
                 extendible hashing has a dynamic structure that grows
                 and shrinks gracefully as the database grows and
                 shrinks. This approach simultaneously solves the
                 problem of making hash tables that are extendible and
                 of making radix search trees that are balanced. We
                 study, by analysis and simulation, the performance of
                 extendible hashing. The results indicate that
                 extendible hashing provides an attractive alternative
                 to other access methods, such as balanced trees.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  journalabr =   "ACM Trans Database Syst",
  keywords =     "access method; B-tree; data processing; directory;
                 extendible hashing; external hashing; file
                 organization; hashing; index; radix search; searching;
                 trie",
  remark =       "The user is guaranteed no more than two page faults to
                 locate the data associated with a given unique
                 identifier, or key. Extendible hashing has a dynamic
                 structure that grows and shrinks as the database grows
                 and shrinks.",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Search process}",
}

@Article{Lam:1979:PSH,
  author =       "Chat Yu Lam and Stuart E. Madnick",
  title =        "Properties of Storage Hierarchy Systems with Multiple
                 Page Sizes and Redundant Data",
  journal =      j-TODS,
  volume =       "4",
  number =       "3",
  pages =        "345--367",
  month =        sep,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1979-4-3/p345-lam/",
  abstract =     "The need for high performance, highly reliable storage
                 for very large on-line databases, coupled with rapid
                 advances in storage device technology, has made the
                 study of generalized storage hierarchies an important
                 area of research.\par

                 This paper analyzes properties of a data storage
                 hierarchy system specifically designed for handling
                 very large on-line databases. To attain high
                 performance and high reliability, the data storage
                 hierarchy makes use of multiple page sizes in different
                 storage levels and maintains multiple copies of the
                 same information across the storage levels. Such a
                 storage hierarchy system is currently being designed as
                 part of the INFOPLEX database computer project.
                 Previous studies of storage hierarchies have primarily
                 focused on virtual memories for program storage and
                 hierarchies with a single page size across all storage
                 levels and/or a single copy of information in the
                 hierarchy.\par

                 In the INFOPLEX design, extensions to the least
                 recently used (LRU) algorithm are used to manage the
                 storage levels. The read-through technique is used to
                 initially load a referenced page of the appropriate
                 size into all storage levels above the one in which the
                 page is found. Since each storage level is viewed as an
                 extension of the immediate higher level, an overflow
                 page from level $i$ is always placed in level $ i + 1
                 $. Important properties of these algorithms are
                 derived. It is shown that depending on the types of
                 algorithms used and the relative sizes of the storage
                 levels, it is not always possible to guarantee that the
                 contents of a given storage level $i$ is always a
                 superset of the contents of its immediate higher
                 storage level $ i - 1 $. The necessary and sufficient
                 conditions for this property to hold are identified and
                 proved. Furthermore, it is possible that increasing the
                 size of intermediate storage levels may actually
                 increase the number of references to lower storage
                 levels, resulting in reduced performance. Conditions
                 necessary to avoid such an anomaly are also identified
                 and proved.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; data storage hierarchy; database
                 computer; inclusion properties; modeling; perform and
                 reliability analysis; storage management algorithms;
                 very large databases",
  subject =      "Information Systems --- Database Management (H.2);
                 Software --- Operating Systems --- Storage Management
                 (D.4.2): {\bf Storage hierarchies}",
}

@Article{Buneman:1979:EMR,
  author =       "O. Peter Buneman and Eric K. Clemons",
  title =        "Efficiently Monitoring Relational Databases",
  journal =      j-TODS,
  volume =       "4",
  number =       "3",
  pages =        "368--382",
  month =        sep,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: Working paper, 76-10-08, Dep.
                 Decision Sciences, The Wharton School, Un. Penn, PA,
                 Jun. 1977.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-3/p368-buneman/p368-buneman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-3/p368-buneman/",
  abstract =     "An alerter is a program which monitors a database and
                 reports to some user or program when a specified
                 condition occurs. It may be that the condition is a
                 complicated expression involving several entities in
                 the database; in this case the evaluation of the
                 expression may be computationally expensive. A scheme
                 is presented in which alerters may be placed on a
                 complex query involving a relational database, and a
                 method is demonstrated for reducing the amount of
                 computation involved in checking whether an alerter
                 should be triggered.",
  acknowledgement = ack-nhfb,
  annote =       "An alerter monitors a database and reports when a
                 specific condition occurs. Alerters may be placed on a
                 query, a method is demonstrated for reducing the amount
                 of computation involved in checking whether an alerter
                 should be triggered. Recomputation of derived data with
                 pruning, viz. identity connection.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "alerters; data base systems; exception reporting;
                 integrity constraints; programming techniques;
                 relational databases",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Comer:1979:HTI,
  author =       "Douglas Comer",
  title =        "Heuristics For Trie Index Minimization",
  journal =      j-TODS,
  volume =       "4",
  number =       "3",
  pages =        "383--395",
  month =        sep,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-3/p383-comer/p383-comer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-3/p383-comer/",
  abstract =     "A trie is a digital search tree in which leaves
                 correspond to records in a file. Searching proceeds
                 from the root to a leaf, where the edge taken at each
                 node depends on the value of an attribute in the query.
                 Trie implementations have the advantage of being fast,
                 but the disadvantage of achieving that speed at great
                 expense in storage space. Of primary concern in making
                 a trie practical, therefore, is the problem of
                 minimizing storage requirements. One method for
                 reducing the space required is to reorder attribute
                 testing. Unfortunately, the problem of finding an
                 ordering which guarantees a minimum-size trie is
                 NP-complete. In this paper we investigate several
                 heuristics for reordering attributes, and derive bounds
                 on the sizes of the worst tries produced by them in
                 terms of the underlying file. Although the analysis is
                 presented for a binary file, extensions to files of
                 higher degree are shown.\par

                 Another alternative for reducing the space required by
                 a trie is an implementation, called an $ \Omega $-trie,
                 in which the order of attribute testing is contained in
                 the trie itself. We show that for most applications, $
                 \Omega $-tries are smaller than other implementations
                 of tries, even when heuristics for improving storage
                 requirements are employed.",
  acknowledgement = ack-nhfb,
  annote =       "Of primary concern in making a trie practical is the
                 problem of minimizing storage requirements. One method
                 for reducing the space is attribute testing which is
                 NP-complete. Another alternative is an $ \Omega $-trie,
                 in which the order of attribute testing is contained in
                 the trie itself. $ \Omega $-tries are smaller than
                 other implementations of tries.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; doubly chained tree; index; trie;
                 trie minimization",
  subject =      "Data --- Data Structures (E.1); Information Systems
                 --- Database Management (H.2)",
}

@Article{Codd:1979:EDR,
  author =       "E. F. Codd",
  title =        "Extending the Database Relational Model to Capture
                 More Meaning",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "397--434",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/nonmono.bib; Compendex database;
                 Compiler/prog.lang.theory.bib; Database/Graefe.bib;
                 Database/Wiederhold.bib; Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  note =         "Reprinted in
                 \cite[pp.~457--475]{Stonebraker:1988:RDS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p397-codd/p397-codd.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p397-codd/",
  abstract =     "During the last three or four years several
                 investigators have been exploring ``semantic models''
                 for formatted databases. The intent is to capture (in a
                 more or less formal way) more of the meaning of the
                 data so that database design can become more systematic
                 and the database system itself can behave more
                 intelligently. Two major thrusts are clear.\par

                 (1) the search for meaningful units that are as small
                 as possible-- {\em atomic semantics\/};\par

                 (2) the search for meaningful units that are larger
                 than the usual $n$-ary relation-- {\em molecular
                 semantics}.\par

                 In this paper we propose extensions to the relational
                 model to support certain atomic and molecular
                 semantics. These extensions represent a synthesis of
                 many ideas from the published work in semantic modeling
                 plus the introduction of new rules for insertion,
                 update, and deletion, as well as new algebraic
                 operators.",
  acknowledgement = ack-nhfb,
  acmcrnumber =  "8905-0330",
  annote =       "``Semantic models'' for formatted databases, to
                 capture in a more or less formal way more of the
                 meaning of the data. Two major thrusts: relation and
                 molecular semantics. Extensions to the relational model
                 (RM/T). New rules for insertion, update, and deletion,
                 as well as new algebraic operators (Theta-select, outer
                 join,\ldots{}.).",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "conceptual model; conceptual schema; data base
                 systems; data model; data semantics; database; database
                 schema; entity model; knowledge base; knowledge
                 representation; relation; relational database;
                 relational model; relational schema; semantic model",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Database Management --- Logical Design (H.2.1):
                 {\bf Schema and subschema}",
}

@Article{Aho:1979:EOC,
  author =       "A. V. Aho and Y. Sagiv and J. D. Ullman",
  title =        "Efficient Optimization of a Class of Relational
                 Expressions",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "435--454",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p435-aho/p435-aho.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p435-aho/",
  abstract =     "The design of several database query languages has
                 been influenced by Codd's relational algebra. This
                 paper discusses the difficulty of optimizing queries
                 based on the relational algebra operations select,
                 project, and join. A matrix, called a tableau, is
                 proposed as a useful device for representing the value
                 of a query, and optimization of queries is couched in
                 terms of finding a minimal tableau equivalent to a
                 given one. Functional dependencies can be used to imply
                 additional equivalences among tableaux. Although the
                 optimization problem is NP-complete, a polynomial time
                 algorithm exists to optimize tableaux that correspond
                 to an important subclass of queries.",
  acknowledgement = ack-nhfb,
  annote =       "Optimizing queries based on select, project, and
                 join.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems, TODS tableaux optimization;
                 equivalence of queries; NP-completeness; query
                 optimization; relational algebra; relational database;
                 tableaux",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@Article{Maier:1979:TID,
  author =       "David Maier and Alberto O. Mendelzon and Yehoshua
                 Sagiv",
  title =        "Testing Implications of Data Dependencies",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "455--469",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p455-maier/p455-maier.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p455-maier/",
  abstract =     "Presented is a computation method --- the {\em
                 chase\/} --- for testing implication of data
                 dependencies by a set of data dependencies. The chase
                 operates on tableaux similar to those of Aho, Sagiv,
                 and Ullman. The chase includes previous tableau
                 computation methods as special cases. By interpreting
                 tableaux alternately as mappings or as templates for
                 relations, it is possible to test implication of join
                 dependencies (including multivalued dependencies) and
                 functional dependencies by a set of dependencies.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "chase; data base systems; data dependencies;
                 functional dependencies; join dependencies; multivalued
                 dependencies; relational databases; tableaux",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Housel:1979:PTI,
  author =       "Barron C. Housel",
  title =        "Pipelining: a Technique for Implementing Data
                 Restructurers",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "470--492",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p470-housel/p470-housel.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p470-housel/",
  abstract =     "In the past several years much attention has been
                 given to the problem of data translation. The focus has
                 been mainly on methodologies and specification
                 languages for accomplishing this task. Recently,
                 several prototype systems have emerged, and now the
                 issues of implementation and performance must be
                 addressed. In general, a data restructuring
                 specification may contain multiple source and target
                 files. This specification can be viewed as a ``process
                 graph'' which is a network of restructuring operations
                 subject to precedence constraints. One technique used
                 to achieve good performance is that of pipelining data
                 in the process graph.\par

                 In this paper we address a number of issues pertinent
                 to a pipelining architecture. Specifically, we give
                 algorithms for resolving deadlock situations which can
                 arise, and partitioning the process graph to achieve an
                 optimal schedule for executing the restructuring steps.
                 In addition, we discuss how pipelining has influenced
                 the design of the restructuring operations and the file
                 structures used in an actual system.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; data translation; database
                 conversion; deadlock; pipelining; process scheduling",
  subject =      "Information Systems --- Database Management ---
                 Heterogeneous Databases (H.2.5): {\bf Data
                 translation**}; Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Deadlock
                 avoidance}",
}

@Article{Shopiro:1979:TPL,
  author =       "Jonathan E. Shopiro",
  title =        "{Theseus} --- {A} Programming Language for Relational
                 Databases",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "493--517",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p493-shopiro/p493-shopiro.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p493-shopiro/",
  abstract =     "Theseus, a very high-level programming language
                 extending EUCLID, is described. Data objects in Theseus
                 include relations and a-sets, a generalization of
                 records. The primary design goals of Theseus are to
                 facilitate the writing of well-structured programs for
                 database applications and to serve as a vehicle for
                 research in automatic program optimization.",
  acknowledgement = ack-nhfb,
  annote =       "Extending EUCLID. Data objects in Theseus include
                 relations and a-sets",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compiler organization; computer programming languages;
                 data base systems; relational database languages; very
                 high-level languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Database (persistent)
                 programming languages}",
}

@Article{Yamamoto:1979:DBM,
  author =       "Sumiyasu Yamamoto and Shinsei Tazawa and Kazuhiko
                 Ushio and Hideto Ikeda",
  title =        "Design of a Balanced Multiple-Valued File-Organization
                 Scheme with the Least Redundancy",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "518--530",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p518-yamamoto/p518-yamamoto.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p518-yamamoto/",
  abstract =     "A new balanced file-organization scheme of order two
                 for multiple-valued records is presented. This scheme
                 is called HUBMFS 2 (Hiroshima University Balanced
                 Multiple-valued File-organization Scheme of order two).
                 It is assumed that records are characterized by $m$
                 attributes having $n$ possible values each, and the
                 query set consists of queries which specify values of
                 two attributes. It is shown that the redundancy of the
                 bucket (the probability of storing a record in the
                 bucket) is minimized if and only if the structure of
                 the bucket is a partite-claw. A necessary and
                 sufficient condition for the existence of an HUBMFS 2,
                 which is composed exclusively of partite-claw buckets,
                 is given. A construction algorithm is also given. The
                 proposed HUBMFS 2 is superior to existing BMFS 2
                 (Balanced Multiple-valued File-organization Schemes of
                 order two) in that it has the least redundancy among
                 all possible BMFS 2 's having the same parameters and
                 that it can be constructed for a less restrictive set
                 of parameters.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "balanced filing scheme; bucket; claw; data processing;
                 file organization; graph decomposition; information
                 retrieval; information storage; inverted file;
                 multipartite graph; multiple-valued attributes;
                 redundancy; secondary index",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Schema and
                 subschema}; Information Systems --- Information Storage
                 and Retrieval --- Information Storage (H.3.2)",
}

@Article{Batory:1979:STF,
  author =       "Don S. Batory",
  title =        "On Searching Transposed Files",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "531--544",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p531-batory/p531-batory.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p531-batory/",
  abstract =     "A transposed file is a collection of nonsequential
                 files called subfiles. Each subfile contains selected
                 attribute data for all records. It is shown that
                 transposed file performance can be enhanced by using a
                 proper strategy to process queries. Analytic cost
                 expressions for processing conjunctive, disjunctive,
                 and batched queries are developed and an effective
                 heuristic for minimizing query processing costs is
                 presented. Formulations of the problem of optimally
                 processing queries for a particular family of
                 transposed files are shown to be NP-complete. Query
                 processing performance comparisons of multilist,
                 inverted, and nonsequential files with transposed files
                 are also considered.",
  acknowledgement = ack-nhfb,
  annote =       "Analytic cost expressions for processing conjunctive,
                 disjunctive, and batch queries are developed and an
                 effective heuristic for minimizing query processing
                 costs is presented.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; file searching; inverted file;
                 multilist; NP-complete; query processing; transposed
                 file",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Search process}",
}

@Article{Bernstein:1979:CPS,
  author =       "Philip A. Bernstein and Marco A. Casanova and Nathan
                 Goodman",
  title =        "Comments on {``Process Synchronization in Database
                 Systems''}",
  journal =      j-TODS,
  volume =       "4",
  number =       "4",
  pages =        "545--546",
  month =        dec,
  year =         "1979",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See \cite{Schlageter:1978:PSD}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1979-4-4/p545-bernstein/p545-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1979-4-4/p545-bernstein/",
  acknowledgement = ack-nhfb,
  annote =       "The results of Schlageter are in error.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  subject =      "Information Systems --- Database Management (H.2)",
}

@Article{Rothnie:1980:ISD,
  author =       "James B. {Rothnie, Jr.} and Philip A. Bernstein and S.
                 Fox and N. Goodman and M. Hammer and T. A. Landers and
                 C. Reeve and David W. Shipman and E. Wong",
  title =        "Introduction to a System for Distributed Databases
                 ({SDD-1})",
  journal =      j-TODS,
  volume =       "5",
  number =       "1",
  pages =        "1--17",
  month =        mar,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib;
                 Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-1/p1-rothnie/p1-rothnie.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-1/p1-rothnie/",
  abstract =     "The declining cost of computer hardware and the
                 increasing data processing needs of geographically
                 dispersed organizations have led to substantial
                 interest in distributed data management. SDD-1 is a
                 distributed database management system currently being
                 developed by Computer Corporation of America. Users
                 interact with SDD-1 precisely as if it were a
                 nondistributed database system because SDD-1 handles
                 all issues arising from the distribution of data. These
                 issues include distributed concurrency control,
                 distributed query processing, resiliency to component
                 failure, and distributed directory management. This
                 paper presents an overview of the SDD-1 design and its
                 solutions to the above problems.\par

                 This paper is the first of a series of companion papers
                 on SDD-1 (Bernstein and Shipman [2], Bernstein et al.
                 [4], and Hammer and Shipman [14]).",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; data base systems; database
                 reliability; distributed database system; query
                 processing; relational data model",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Relational databases}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Concurrency}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query
                 processing}",
}

@Article{Bernstein:1980:CCS,
  author =       "Philip A. Bernstein and David W. Shipman and James B.
                 {Rothnie, Jr.}",
  title =        "Concurrency Control in a System for Distributed
                 Databases ({SDD-1})",
  journal =      j-TODS,
  volume =       "5",
  number =       "1",
  pages =        "18--51",
  month =        mar,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-1/p18-bernstein/p18-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-1/p18-bernstein/",
  abstract =     "This paper presents the concurrency control strategy
                 of SDD-1. SDD-1, a System for Distributed Databases, is
                 a prototype distributed database system being developed
                 by Computer Corporation of America. In SDD-1, portions
                 of data distributed throughout a network may be
                 replicated at multiple sites. The SDD-1 concurrency
                 control guarantees database consistency in the face of
                 such distribution and replication.\par

                 This paper is one of a series of companion papers on
                 SDD-1 [4, 10, 12, 21].",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; conflict graph; data base
                 systems; distributed database system; serializability;
                 synchronization; timestamps",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}",
}

@Article{Bernstein:1980:CCCb,
  author =       "Philip A. Bernstein and David W. Shipman",
  title =        "Correctness of Concurrency Control Mechanisms in a
                 System for Distributed Databases ({SDD-1})",
  journal =      j-TODS,
  volume =       "5",
  number =       "1",
  pages =        "52--68",
  month =        mar,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-1/p52-bernstein/p52-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-1/p52-bernstein/",
  abstract =     "This paper presents a formal analysis of the
                 concurrency control strategy of SDD-1. SDD-1, a System
                 for Distributed Databases, is a prototype distributed
                 database system being developed by Computer Corporation
                 of America. In SDD-1, portions of data distributed
                 throughout a network may be replicated at multiple
                 sites. The SDD-1 concurrency control guarantees
                 database consistency in the face of such distribution
                 and replication.\par

                 This paper is one of a series of companion papers on
                 SDD-1 [2, 8].",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "conflict graph; correctness of concurrency control;
                 data base systems; distributed database system;
                 serializability theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}",
}

@Article{Gopalakrishna:1980:PEA,
  author =       "V. Gopalakrishna and C. E. {Veni Madhavan}",
  title =        "Performance Evaluation of Attribute-Based Tree
                 Organization",
  journal =      j-TODS,
  volume =       "5",
  number =       "1",
  pages =        "69--87",
  month =        mar,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-1/p69-gopalakrishna/p69-gopalakrishna.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-1/p69-gopalakrishna/",
  abstract =     "A modified version of the multiple attribute tree
                 (MAT) database organization, which uses a compact
                 directory, is discussed. An efficient algorithm to
                 process the directory for carrying out the node
                 searches is presented. Statistical procedures are
                 developed to estimate the number of nodes searched and
                 the number of data blocks retrieved for most general
                 and complex queries. The performance of inverted file
                 and modified MAT organizations are compared using six
                 real-life databases and four types of query
                 complexities. Careful tradeoffs are established in
                 terms of storage and access times for directory and
                 data, query complexities, and database
                 characteristics.",
  acknowledgement = ack-nhfb,
  annote =       "A version of the multiple attribute tree (MAT)
                 database organization. Statistical procedures are
                 developed to estimate the number of nodes searched and
                 the number of data blocks retrieved. The performance of
                 inverted file and modified MAT organizations are
                 compared.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access time; average retrieval time per query; data
                 base systems; database organization; database
                 performance; directory search time; modified multiple
                 attribute tree; query complexity; secondary index
                 organization",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3)",
}

@Article{Denning:1980:FPF,
  author =       "Dorothy E. Denning and Jan Schl{\"o}rer",
  title =        "Fast Procedure for Finding a Tracker in a Statistical
                 Database",
  journal =      j-TODS,
  volume =       "5",
  number =       "1",
  pages =        "88--102",
  month =        mar,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-1/p88-denning/p88-denning.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-1/p88-denning/",
  abstract =     "To avoid trivial compromises, most on-line statistical
                 databases refuse to answer queries for statistics about
                 small subgroups. Previous research discovered a
                 powerful snooping tool, the tracker, with which the
                 answers to these unanswerable queries are easily
                 calculated. However, the extent of this threat was not
                 clear, for no one had shown that finding a tracker is
                 guaranteed to be easy.\par

                 This paper gives a simple algorithm for finding a
                 tracker when the maximum number of identical records is
                 not too large. The number of queries required to find a
                 tracker is at most {$ O(\log_2 S) $} queries, where
                 {$S$} is the number of distinct records possible.
                 Experimental results show that the procedure often
                 finds a tracker with just a few queries. The threat
                 posed by trackers is therefore considerable.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "confidentiality; data base systems; data security;
                 database security; statistical database; tracker",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}; Information Systems --- Database Management
                 --- Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}",
}

@Article{Menasce:1980:LPR,
  author =       "Daniel A. Menasc{\'e} and Gerald J. Popek and Richard
                 R. Muntz",
  title =        "A Locking Protocol for Resource Coordination in
                 Distributed Databases",
  journal =      j-TODS,
  volume =       "5",
  number =       "2",
  pages =        "103--138",
  month =        jun,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-2/p103-menasce/p103-menasce.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-2/p103-menasce/",
  abstract =     "A locking protocol to coordinate access to a
                 distributed database and to maintain system consistency
                 throughout normal and abnormal conditions is presented.
                 The proposed protocol is robust in the face of crashes
                 of any participating site, as well as communication
                 failures. Recovery from any number of failures during
                 normal operation or any of the recovery stages is
                 supported. Recovery is done in such a way that maximum
                 forward progress is achieved by the recovery
                 procedures. Integration of virtually any locking
                 discipline including predicate lock methods is
                 permitted by this protocol. The locking algorithm
                 operates, and operates correctly, when the network is
                 partitioned, either intentionally or by failure of
                 communication lines. Each partition is able to continue
                 with work local to it, and operation merges gracefully
                 when the partitions are reconnected.\par

                 A subroutine of the protocol, that assures reliable
                 communication among sites, is shown to have better
                 performance than two-phase commit methods. For many
                 topologies of interest, the delay introduced by the
                 overall protocol is not a direct function of the size
                 of the network. The communications cost is shown to
                 grow in a relatively slow, linear fashion with the
                 number of sites participating in the transaction. An
                 informal proof of the correctness of the algorithm is
                 also presented in this paper.\par

                 The algorithm has as its core a centralized locking
                 protocol with distributed recovery procedures. A
                 centralized controller with local appendages at each
                 site coordinates all resource control, with requests
                 initiated by application programs at any site. However,
                 no site experiences undue load. Recovery is broken down
                 into three disjoint mechanisms: for single node
                 recovery, merge of partitions, and reconstruction of
                 the centralized controller and tables. The disjointness
                 of the mechanisms contributes to comprehensibility and
                 ease of proof.\par

                 The paper concludes with a proposal for an extension
                 aimed at optimizing operation of the algorithm to adapt
                 to highly skewed distributions of activity. The
                 extension applies nicely to interconnected computer
                 networks.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; consistency; crash recovery; distributed
                 databases; locking protocol",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}",
}

@Article{Bayer:1980:PRD,
  author =       "R. Bayer and H. Heller and A. Reiser",
  title =        "Parallelism and Recovery in Database Systems",
  journal =      j-TODS,
  volume =       "5",
  number =       "2",
  pages =        "139--156",
  month =        jun,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-2/p139-bayer/p139-bayer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-2/p139-bayer/",
  abstract =     "In this paper a new method to increase parallelism in
                 database systems is described. Use is made of the fact
                 that for recovery reasons, we often have two values for
                 one object in the database--the new one and the old
                 one. Introduced and discussed in detail is a certain
                 scheme by which readers and writers may work
                 simultaneously on the same object. It is proved that
                 transactions executed according to this scheme have the
                 correct effect; i.e., consistency is preserved. Several
                 variations of the basic scheme which are suitable
                 depending on the degree of parallelism required, are
                 described.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; consistency; data base systems; deadlock;
                 integrity; recovery; synchronization; transaction; two
                 phase locking",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Deadlock avoidance};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Transaction processing}",
}

@Article{Navathe:1980:SAD,
  author =       "Shamkant B. Navathe",
  title =        "Schema Analysis for Database Restructuring",
  journal =      j-TODS,
  volume =       "5",
  number =       "2",
  pages =        "157--184",
  month =        jun,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: Proceedings of the Third
                 Conference on Very Large Databases, Morgan Kaufman
                 pubs. (Los Altos CA), 1977.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-2/p157-navathe/p157-navathe.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-2/p157-navathe/",
  abstract =     "The problem of generalized restructuring of databases
                 has been addressed with two limitations: first, it is
                 assumed that the restructuring user is able to describe
                 the source and target databases in terms of the
                 implicit data model of a particular methodology;
                 second, the restructuring user is faced with the task
                 of judging the scope and applicability of the defined
                 types of restructuring to his database implementation
                 and then of actually specifying his restructuring needs
                 by translating them into the restructuring operations
                 on a foreign data model. A certain amount of analysis
                 of the logical and physical structure of databases must
                 be performed, and the basic ingredients for such an
                 analysis are developed here. The distinction between
                 hierarchical and nonhierarchical data relationships is
                 discussed, and a classification for database schemata
                 is proposed. Examples are given to illustrate how these
                 schemata arise in the conventional hierarchical and
                 network systems. Application of the schema analysis
                 methodology to restructuring specification is also
                 discussed. An example is presented to illustrate the
                 different implications of restructuring three seemingly
                 identical database structures.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; data model; data relationships;
                 data semantics; data structure; database; database
                 design; database management systems; database
                 restructuring; graphical representation of data;
                 schema; stored data",
  subject =      "Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Schema and subschema}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Relational databases}",
}

@Article{Mylopoulos:1980:LFD,
  author =       "John Mylopoulos and Philip A. Bernstein and Harry K.
                 T. Wong",
  title =        "A Language Facility for Designing Database-Intensive
                 Applications",
  journal =      j-TODS,
  volume =       "5",
  number =       "2",
  pages =        "185--207",
  month =        jun,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib; Object/Nierstrasz.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-2/p185-mylopoulos/p185-mylopoulos.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-2/p185-mylopoulos/",
  abstract =     "TAXIS, a language for the design of interactive
                 information systems (e.g., credit card verification,
                 student-course registration, and airline reservations)
                 is described. TAXIS offers (relational) database
                 management facilities, a means of specifying semantic
                 integrity constraints, and an exception-handling
                 mechanism, integrated into a single language through
                 the concepts of {\em class, property}, and the {\em
                 IS-A\/} (generalization) {\em relationship}. A
                 description of the main constructs of TAXIS is included
                 and their usefulness illustrated with examples.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "abstract data type; applications programming;
                 exception handling; information system; relational data
                 model; semantic network",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Database Management --- Languages (H.2.3)",
}

@Article{Lozinskii:1980:CRR,
  author =       "Eliezer L. Lozinskii",
  title =        "Construction of Relations in Relational Databases",
  journal =      j-TODS,
  volume =       "5",
  number =       "2",
  pages =        "208--224",
  month =        jun,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-2/p208-lozinskii/p208-lozinskii.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-2/p208-lozinskii/",
  abstract =     "Using a nonprocedural language for query formulation
                 requires certain automatization of a query answering
                 process. Given a query for creation of a new relation,
                 the problem is to find an efficient procedure which
                 produces this relation from a given relational
                 database. The author concentrates upon sequences of
                 join operations which losslessly produce a relation
                 required by a query. A new property of such sequences
                 is analyzed which provides a basis for the presented
                 algorithms that construct an efficient join procedure.
                 The algorithms have polynomial complexity. A modified
                 AND\slash OR graph is used for the display of a given
                 set of dependencies and a collection of relations
                 representing a database.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; AND/OR graphs; data base systems; lossless
                 joins; query answering; relational databases",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Stonebraker:1980:RDS,
  author =       "Michael Stonebraker",
  title =        "Retrospection on a Database System",
  journal =      j-TODS,
  volume =       "5",
  number =       "2",
  pages =        "225--240",
  month =        jun,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Reprinted in \cite{Stonebraker:1988:RDS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-2/p225-stonebraker/p225-stonebraker.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-2/p225-stonebraker/",
  abstract =     "This paper describes the implementation history of the
                 INGRES database system. It focuses on mistakes that
                 were made in progress rather than on eventual
                 corrections. Some attention is also given to the role
                 of structured design in a database system
                 implementation and to the problem of supporting
                 nontrivial users. Lastly, miscellaneous impressions of
                 UNIX, the PDP-11, and data models are given.",
  acknowledgement = ack-nhfb,
  annote =       "The implementation history of the INGRES database
                 system. The role of structured design in a database
                 system implementation, impressions of UNIX, the PDP-11,
                 and data models are given.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency; data base systems, history evaluation,
                 Ingres, TODS; integrity; nonprocedural languages;
                 protection; recovery; relational databases",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Relational
                 databases}; Information Systems --- Database Management
                 --- Physical Design (H.2.2): {\bf Recovery and
                 restart}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Concurrency}",
}

@Article{Beeri:1980:MPF,
  author =       "Catriel Beeri",
  title =        "On the Membership Problem for Functional and
                 Multivalued Dependencies in Relational Databases",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "241--259",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-3/p241-beeri/p241-beeri.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-3/p241-beeri/",
  abstract =     "The problem of whether a given dependency in a
                 database relation can be derived from a given set of
                 dependencies is investigated. We show that the problem
                 can be decided in polynomial time when the given set
                 consists of either multivalued dependencies only or of
                 both functional and multivalued dependencies and the
                 given dependency is also either a functional or a
                 multivalued dependency. These results hold when the
                 derivations are restricted not to use the
                 complementation rule.",
  acknowledgement = ack-nhfb,
  annote =       "The problem of whether a given dependency in a
                 database relation can be derived from a given set of
                 dependencies is investigated. We show that the problem
                 can be decided in polynomial time when the given set
                 consists of either multivalued dependencies only or of
                 both functional and multivalued dependencies and the
                 given dependency is also either a functional or a
                 multivalued dependency. These results hold when the
                 derivations are restricted not to use the
                 complementation rule.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; functional dependency; inference
                 rule; membership; multivalued dependency; relations",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Klug:1980:CCR,
  author =       "A. Klug",
  title =        "Calculating Constraints on Relational Expressions",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "260--290",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-3/p260-klug/p260-klug.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-3/p260-klug/",
  abstract =     "This paper deals with the problem of determining which
                 of a certain class of constraints hold on a given
                 relational algebra expression where the base relations
                 come from a given schema. The class of constraints
                 includes functional dependencies, equality of domains,
                 and constancy of domains. The relational algebra
                 consists of projection, selection, restriction, cross
                 product, union, and difference. The problem as given is
                 undecidable, but if set difference is removed from the
                 algebra, there is a solution. Operators specifying a
                 closure function (similar to functional dependency
                 closure on one relation) are defined; these will
                 generate exactly the set of constraints valid on the
                 given relational algebra expression. We prove that the
                 operators are sound and complete.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "completeness; constraints; data base systems;
                 derivation rules; functional dependencies; Views",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Denning:1980:SSD,
  author =       "Dorothy E. Denning",
  title =        "Secure Statistical Databases with Random Sample
                 Queries",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "291--315",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-3/p291-denning/p291-denning.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-3/p291-denning/",
  abstract =     "A new inference control, called random sample queries,
                 is proposed for safeguarding confidential data in
                 on-line statistical databases. The random sample
                 queries control deals directly with the basic principle
                 of compromise by making it impossible for a questioner
                 to control precisely the formation of query sets.
                 Queries for relative frequencies and averages are
                 computed using random samples drawn from the query
                 sets. The sampling strategy permits the release of
                 accurate and timely statistics and can be implemented
                 at very low cost. Analysis shows the relative error in
                 the statistics decreases as the query set size
                 increases; in contrast, the effort required to
                 compromise increases with the query set size due to
                 large absolute errors. Experiments performed on a
                 simulated database support the analysis.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "confidentiality; data base systems; database security;
                 disclosure controls; sampling; statistical database",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Query processing};
                 Information Systems --- Database Management ---
                 Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}",
}

@Article{Beck:1980:SMS,
  author =       "Leland L. Beck",
  title =        "A security mechanism for statistical database",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "316--338",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-3/p316-beck/p316-beck.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-3/p316-beck/",
  abstract =     "The problem of user inference in statistical databases
                 is discussed and illustrated with several examples. It
                 is assumed that the database allows ``total'',
                 ``average'', ``count'', and ``percentile'' queries; a
                 query may refer to any arbitrary subset of the
                 database. Methods for protecting the security of such a
                 database are considered; it is shown that any scheme
                 which gives ``statistically correct'' answers is
                 vulnerable to penetration. A precise definition of
                 compromisability (in a statistical sense) is given. A
                 general model of user inference is proposed; two
                 special cases of this model appear to contain all
                 previously published strategies for compromising a
                 statistical database. A method for protecting the
                 security of such a statistical database against these
                 types of user inference is presented and discussed. It
                 is shown that the number of queries required to
                 compromise the database can be made arbitrarily large
                 by accepting moderate increases in the variance of
                 responses to queries. A numerical example is presented
                 to illustrate the application of the techniques
                 discussed.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compromisability; data base systems; data security;
                 database inference; privacy protection; statistical
                 databases; statistical queries",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0): {\bf Security, integrity, and
                 protection**}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}",
}

@Article{Lee:1980:QTF,
  author =       "D. T. Lee and C. K. Wong",
  title =        "Quintary Trees: a File Structure for Multidimensional
                 Database Systems",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "339--353",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Graphics/siggraph/80.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-3/p339-lee/p339-lee.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-3/p339-lee/",
  abstract =     "A file structure is presented that was designed for a
                 database system in which four types of retrieval
                 requests (queries) are allowed: exact match, partial
                 match, range, and partial range queries. Outlines are
                 sketched for inserting and deleting records that
                 require O(k plus (log N)**k) time, on the average. This
                 structure achieves faster response time than previously
                 known structures (for many of the queries) at the cost
                 of extra storage.",
  acknowledgement = ack-nhfb,
  annote =       "Four types of retrieval (queries) are allowed: exact
                 match, partial match, range, and partial range queries.
                 Faster response time at the cost of extra storage.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; data processing --- data
                 structures; database system; exact match queries; file
                 maintenance; information retrieval; key;
                 multidimensional space; queries; range search; search",
  subject =      "Data --- Data Structures (E.1): {\bf Trees};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}; Information Systems
                 --- Information Storage and Retrieval --- Information
                 Storage (H.3.2): {\bf File organization}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3)",
}

@Article{Kung:1980:CMB,
  author =       "H. T. Kung and Philip L. Lehman",
  title =        "Concurrent Manipulation of Binary Search Trees",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "354--382",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-3/p354-kung/p354-kung.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-3/p354-kung/",
  abstract =     "The concurrent manipulation of a binary search tree is
                 considered in this paper. The systems presented can
                 support any number of concurrent processes which
                 perform searching, insertion, deletion, and rotation
                 (reorganization) on the tree, but allow any process to
                 lock only a constant number of nodes at any time. Also,
                 in the systems, searches are essentially never blocked.
                 The concurrency control techniques introduced in the
                 paper include the use of special nodes and pointers to
                 redirect searches, and the use of copies of sections of
                 the tree to introduce many changes simultaneously and
                 therefore avoid unpredictable interleaving. Methods
                 developed in this paper may provide new insights into
                 other problems in the area of concurrent database
                 manipulation.",
  acknowledgement = ack-nhfb,
  annote =       "Operations on tries are defined so that concurrency of
                 access is possible while the number of locked nodes is
                 minimal.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "binary search trees; concurrency controls; concurrent
                 algorithm; consistency; correctness; data processing;
                 data structures; databases; locking protocols",
  subject =      "Data --- Data Structures (E.1): {\bf Trees};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}",
}

@Article{Denning:1980:CLQ,
  author =       "D. E. Denning",
  title =        "Corrigenda on Linear Queries in Statistical
                 Databases",
  journal =      j-TODS,
  volume =       "5",
  number =       "3",
  pages =        "383--383",
  month =        sep,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  annote =       "refers to Schwartz 1979 TODS.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
}

@Article{Hsiao:1980:TFT,
  author =       "David K. Hsiao",
  title =        "{TODS} --- the first three years {(1976\&ndash1978)}",
  journal =      j-TODS,
  volume =       "5",
  number =       "4",
  pages =        "385--403",
  month =        dec,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-4/p385-hsiao/p385-hsiao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-4/p385-hsiao/",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  subject =      "General Literature --- General (A.0)",
}

@Article{Armstrong:1980:DFD,
  author =       "W. W. Armstrong and C. Delobel",
  title =        "Decompositions and Functional Dependencies in
                 Relations",
  journal =      j-TODS,
  volume =       "5",
  number =       "4",
  pages =        "404--430",
  month =        dec,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-4/p404-armstrong/p404-armstrong.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-4/p404-armstrong/",
  abstract =     "A general study is made of two basic integrity
                 constraints on relations: functional and multivalued
                 dependencies. The latter are studied via an equivalent
                 concept: decompositions. A model is constructed for any
                 possible combination of functional dependencies and
                 decompositions. The model embodies some decompositions
                 as unions of relations having different schemata of
                 functional dependencies. This suggests a new, stronger
                 integrity constraint, the degenerate decomposition.
                 More generally, the theory demonstrates the importance
                 of using the union operation in database design and of
                 allowing different schemata on the operands of a union.
                 Techniques based on the union lead to a method for
                 solving the problem of membership of a decomposition in
                 the closure of a given set of functional dependencies
                 and decompositions. The concept of antiroot is
                 introduced as a tool for describing families of
                 decompositions, and its fundamental importance for
                 database design is indicated.",
  acknowledgement = ack-nhfb,
  annote =       "A general study is made of two basic integrity
                 constrains, functional and multivalued dependencies,
                 via an equivalent concept: decompositions.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; decomposition; functional
                 dependency; integrity constraint; multivalued
                 dependency; relational database",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases}",
}

@Article{Hammer:1980:RMS,
  author =       "Michael Hammer and David Shipman",
  title =        "Reliability Mechanisms for {SDD-1}: a System for
                 Distributed Databases",
  journal =      j-TODS,
  volume =       "5",
  number =       "4",
  pages =        "431--466",
  month =        dec,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-4/p431-hammer/p431-hammer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-4/p431-hammer/",
  abstract =     "This paper presents the reliability mechanisms of
                 SDD-1, a prototype distributed database system being
                 developed by the Computer Corporation of America.
                 Reliability algorithms in SDD-1 center around the
                 concept of the Reliable Network (RelNet). The RelNet is
                 a communications medium incorporating facilities for
                 site status monitoring, event timestamping, multiply
                 buffered message delivery, and the atomic control of
                 distributed transactions.\par

                 This paper is one of a series of companion papers on
                 SDD-1 [3, 4, 6, 13].",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "atomicity; data base systems; distributed databases;
                 recovery; reliability",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart}",
}

@Article{Schloer:1980:SSD,
  author =       "Jan Schl{\"o}er",
  title =        "Security of statistical databases: multidimensional
                 transformation",
  journal =      j-TODS,
  volume =       "5",
  number =       "4",
  pages =        "467--492",
  month =        dec,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1980-5-4/p467-schler/",
  abstract =     "Statistical evaluation of databases which contain
                 personal records may entail risks for the
                 confidentiality of the individual records. The risk has
                 increased with the availability of flexible interactive
                 evaluation programs which permit the use of trackers,
                 the most dangerous class of snooping tools known. A
                 class of trackers, called union trackers, is described.
                 They permit reconstruction of the entire database
                 without supplementary knowledge and include the general
                 tracker recently described as a special case. For many
                 real statistical databases the overwhelming majority of
                 definable sets of records will form trackers. For such
                 databases a random search for a tracker is likely to
                 succeed rapidly. Individual trackers are redefined and
                 counted and their cardinalities are investigated. If
                 there are $n$ records in the database, then most
                 individual trackers employ innocent cardinalities near
                 $ n / 3 $, making them difficult to detect. Disclosure
                 with trackers usually requires little effort per
                 retrieved data element.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "confidentiality; database security; security;
                 statistical database; tracker",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}; Information Systems --- Database Management
                 --- Database Administration (H.2.7): {\bf Security,
                 integrity, and protection}",
  xxtitle =      "Disclosure from Statistical Databases: Quantitative
                 Aspects of Trackers",
}

@Article{Herot:1980:SMD,
  author =       "Christopher F. Herot",
  title =        "Spatial Management of Data",
  journal =      j-TODS,
  volume =       "5",
  number =       "4",
  pages =        "493--513",
  month =        dec,
  year =         "1980",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Distributed/gesturing.bib;
                 Graphics/imager/imager.80.bib;
                 Graphics/siggraph/80.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1980-5-4/p493-herot/p493-herot.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1980-5-4/p493-herot/",
  abstract =     "Spatial data management is a technique for organizing
                 and retrieving information by positioning it in a
                 graphical data space (GDS). This graphical data space
                 is viewed through a color raster-scan display which
                 enables users to traverse the GDS surface or zoom into
                 the image to obtain greater detail. In contrast to
                 conventional database management systems, in which
                 users access data by asking questions in a formal query
                 language, a spatial data management system (SDMS)
                 presents the information graphically in a form that
                 seems to encourage browsing and to require less prior
                 knowledge of the contents and organization of the
                 database.\par

                 This paper presents an overview of the SDMS concept and
                 describes its implementation in a prototype system for
                 retrieving information from both a symbolic database
                 management system and an optical videodisk.",
  acknowledgement = ack-nhfb,
  annote =       "Organizing and retrieving information by positioning
                 it in a graphical data space viewed through a color
                 display. An overview of the SDMS concept and describes
                 its implementation in a prototype system.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer graphics; database query languages;
                 graphical/programming language, query language,
                 Man-Machine Communications interaction, data base
                 systems; graphics languages; man-machine interaction",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Spatial databases
                 and GIS}; Information Systems --- Database Management
                 --- Languages (H.2.3): {\bf Query languages};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@Article{Zaniolo:1981:DRD,
  author =       "Carlo Zaniolo and Michel A. Melkanoff",
  title =        "On the Design of Relational Database Schemata",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "1--47",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15 (68H05)",
  MRnumber =     "82b:68019",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p1-zaniolo/p1-zaniolo.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p1-zaniolo/",
  abstract =     "The purpose of this paper is to present a new approach
                 to the conceptual design of relational databases based
                 on the complete relatability conditions (CRCs).\par

                 It is shown that current database design methodology
                 based upon the elimination of anomalies is not
                 adequate. In contradistinction, the CRCs are shown to
                 provide a powerful criticism for decomposition. A
                 decomposition algorithm is presented which (1) permits
                 decomposition of complex relations into simple,
                 well-defined primitives, (2) preserves all the original
                 information, and (3) minimizes redundancy.\par

                 The paper gives a complete derivation of the CRCs,
                 beginning with a unified treatment of functional and
                 multivalued dependencies, and introduces the concept of
                 elementary functional dependencies and multiple
                 elementary multivalued dependencies. Admissibility of
                 covers and validation of results are also discussed,
                 and it is shown how these concepts may be used to
                 improve the design of 3NF schemata. Finally, a
                 convenient graphical representation is proposed, and
                 several examples are described in detail to illustrate
                 the method.",
  acknowledgement = ack-nhfb,
  annote =       "The conceptual design of relational databases based on
                 the complete reliability conditions (CRCs). A unified
                 treatment of functional and multivalued dependencies.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; decomposition; functional
                 dependencies; minimal covers; multivalued dependencies;
                 relational databases; schema design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Schema and subschema}",
}

@Article{Lien:1981:HSR,
  author =       "Y. Edmund Lien",
  title =        "Hierarchical Schemata for Relational Databases",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "48--69",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15 (68H05)",
  MRnumber =     "82b:68015",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p48-lien/p48-lien.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p48-lien/",
  abstract =     "Most database design methods for the relational model
                 produce a flat database, that is, a family of relations
                 with no explicit interrelational connections. The user
                 of a flat database is likely to be unaware of certain
                 interrelational semantics. In contrast, the
                 entity-relationship model provides schema graphs as a
                 description of the database, as well as for navigating
                 the database. Nevertheless, the user of an
                 entity-relationship database may still commit semantic
                 errors, such as performing a lossy join. This paper
                 proposes a nonflat, or hierarchical, view of relational
                 databases. Relations are grouped together to form {\em
                 relation hierarchies\/} in which lossless joins are
                 explicitly shown whereas lossy joins are excluded.
                 Relation hierarchies resemble the schema graphs in the
                 entity-relationship model.\par

                 An approach to the design of relation hierarchies is
                 outlined in the context of data dependencies and
                 relational decomposition. The approach consists of two
                 steps; each is described as an algorithm. Algorithm DEC
                 decomposes a given universal relation according to a
                 given set of data dependencies and produces a set of
                 nondecomposable relation schemes. This algorithm
                 differs from its predecessors in that it produces no
                 redundant relation schemes. Algorithm RH further
                 structures the relation schemes produced by Algorithm
                 DEC into a hierarchical schema. These algorithms can be
                 useful software tools for database designers.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database design; lossless join;
                 multivalued dependency; relation normalization",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Schema and subschema}",
}

@Article{Chamberlin:1981:SRT,
  author =       "D. D. Chamberlin and M. M. Astrahan and W. F. King and
                 R. A. Lorie and J. W. Mehl and T. G. Price and M.
                 Schkolnick and P. Griffiths Selinger and D. R. Slutz
                 and B. W. Wade and R. A. Yost",
  title =        "Support for Repetitive Transactions and Ad Hoc Queries
                 in {System R}",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "70--94",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: IBM Research Report
                 RJ2551(33151), May. 1979.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p70-chamberlin/p70-chamberlin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p70-chamberlin/",
  abstract =     "System R supports a high-level relational user
                 language called SQL which may be used by ad hoc users
                 at terminals or as an embedded data sublanguage in PL/I
                 or COBOL. Host-language programs with embedded SQL
                 statements are processed by the System R precompiler
                 which replaces the SQL statements by calls to a
                 machine-language access module. The precompilation
                 approach removes much of the work of parsing, name
                 binding, and access path selection from the path of a
                 running program, enabling highly efficient support for
                 repetitive transactions. Ad hoc queries are processed
                 by a similar approach of name binding and access path
                 selection which takes place on-line when the query is
                 specified. By providing a flexible spectrum of binding
                 times, System R permits transaction-oriented programs
                 and ad hoc query users to share a database without loss
                 of efficiency.\par

                 System R is an experimental database management system
                 designed and built by members of the IBM San Jose
                 Research Laboratory as part of a research program on
                 the relational model of data. This paper describes the
                 architecture of System R, and gives some preliminary
                 measurements of system performance in both the ad hoc
                 query and the ``canned program'' environments.",
  acknowledgement = ack-nhfb,
  annote =       "Embedded SQL statements are processed by the System R
                 precompiler enabling highly efficient support for
                 repetitive transactions. Ad hoc query is specified. By
                 providing a flexible spectrum of binding times. System
                 R permits transaction-oriented programs and ad hoc
                 query users to share a database without loss of
                 efficiency.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compilation; data base systems, TODS ad-hoc relation
                 database IBM San Jose; performance measurements; query
                 languages; relational database systems; transaction
                 processing",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf System R}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Relational databases}",
}

@Article{Schlorer:1981:SSD,
  author =       "Jan Schl{\"o}rer",
  title =        "Security of Statistical Databases: Multidimensional
                 Transformation",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "95--112",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15",
  MRnumber =     "82b:68018",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p95-schlorer/p95-schlorer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p95-schlorer/",
  abstract =     "The concept of multidimensional transformation of
                 statistical databases is described. A given set of
                 statistical output may be compatible with more than one
                 statistical database. A transformed database $ D' $ is
                 a database which (1) differs from the original database
                 $D$ in its record content, for (2) produces, within
                 certain limits, the same statistical output as the
                 original database. For a transformable database $D$
                 there are two options: One may physically transform $D$
                 into a suitable database $ D' $, or one may release
                 only that output which will not permit the users to
                 decide whether it comes from $D$ or $ D' $. The second
                 way is, of course, the easier one. Basic structural
                 requirements for transformable statistical databases
                 are investigated. Advantages, drawbacks, and open
                 questions are discussed.",
  acknowledgement = ack-nhfb,
  annote =       "A transformed database differs from the original
                 database in its record content but produces within
                 certain limits the same statistical output.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "confidentiality; data base systems; data processing
                 --- security of data; database; database security;
                 matrices; security; statistical database",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}",
}

@Article{Chin:1981:SDD,
  author =       "Francis Y. Chin and Gultekin {\"O}zsoyo{\u{g}}lu",
  title =        "Statistical Database Design",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "113--139",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p113-chin/p113-chin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p113-chin/",
  abstract =     "The security problem of a statistical database is to
                 limit the use of the database so that no sequence of
                 statistical queries is sufficient to deduce
                 confidential or private information. In this paper it
                 is suggested that the problem be investigated at the
                 conceptual data model level. The design of a
                 statistical database should utilize a statistical
                 security management facility to enforce the security
                 constraints at the conceptual model level. Information
                 revealed to users is well defined in the sense that it
                 can at most be reduced to nondecomposable information
                 involving a group of individuals. In addition, the
                 design also takes into consideration means of storing
                 the query information for auditing purposes, changes in
                 the database, users' knowledge, and some security
                 measures.",
  acknowledgement = ack-nhfb,
  annote =       "Limit the use of the database so that no sequence of
                 statistical queries is sufficient to deduce
                 confidential information at the conceptual data model
                 level.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compromisability; conceptual databases model; data
                 base systems; data processing --- security of data;
                 database design; protection; security; statistical
                 database",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8): {\bf Statistical
                 databases}",
}

@Article{Shipman:1981:FDM,
  author =       "David W. Shipman",
  title =        "The Functional Data Model and the Data Language
                 {DAPLEX}",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "140--173",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/bibdb.bib;
                 Database/Graefe.bib; Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/Functional.bib; Misc/is.bib",
  note =         "Reprinted in \cite{Stonebraker:1988:RDS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p140-shipman/p140-shipman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p140-shipman/",
  abstract =     "DAPLEX is a database language which incorporates:
                 \par

                 a formulation of data in terms of entities;\par

                 a functional representation for both actual and virtual
                 data relationships;\par

                 a rich collection of language constructs for expressing
                 entity selection criteria;\par

                 a notion of subtype/supertype relationships among
                 entity types.\par

                 This paper presents and motivates the DAPLEX language
                 and the underlying data model on which it is based.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; data base systems;
                 database; functional data model; language",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf DAPLEX}; Information Systems
                 --- Database Management --- Languages (H.2.3)",
}

@Article{Rosenberg:1981:TSO,
  author =       "Arnold L. Rosenberg and Lawrence Snyder",
  title =        "Time- and Space-Optimality in {B-Trees}",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "174--193",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15 (68E10)",
  MRnumber =     "82m:68048",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p174-rosenberg/p174-rosenberg.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p174-rosenberg/",
  abstract =     "A B-tree is {\em compact\/} if it is minimal in number
                 of nodes, hence has optimal space utilization, among
                 equally capacious B-trees of the same order. The space
                 utilization of compact B-trees is analyzed and compared
                 with that of noncompact B-trees and with
                 (node)-visit-optimal B-trees, which minimize the
                 expected number of nodes visited per key access.
                 Compact B-trees can be as much as a {\em factor\/} of
                 2.5 more space efficient than visit-optimal B-trees;
                 and the node-visit cost of a compact tree is never more
                 than 1 + the node-visit cost of an optimal tree. The
                 utility of initializing a B-tree to be compact (which
                 initialization can be done in time linear in the number
                 of keys if the keys are presorted) is demonstrated by
                 comparing the space utilization of a compact tree that
                 has been augmented by random insertions with that of a
                 tree that has been grown entirely by random insertions.
                 Even after increasing the number of keys by a modest
                 amount, the effects of compact initialization are still
                 felt. Once the tree has grown so large that these
                 effects are no longer discernible, the tree can be
                 expeditiously compacted in place using an algorithm
                 presented here; and the benefits of compactness
                 resume.",
  acknowledgement = ack-nhfb,
  annote =       "A Btree is compact if it is minimal in number of
                 nodes. Compact Btree initialization can be done in time
                 linear in the number of keys if the keys are presorted.
                 Study indicates that space-optimal trees are nearly
                 time optimal, but time-optimal trees are nearly space
                 pessimal.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "2,3-tree; B-tree; bushy B-tree; compact B-tree; data
                 processing; node-visit cost; space utilization",
  subject =      "Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Trees}",
}

@Article{Scholl:1981:NFO,
  author =       "Michel Scholl",
  title =        "New File Organizations Based on Dynamic Hashing",
  journal =      j-TODS,
  volume =       "6",
  number =       "1",
  pages =        "194--211",
  month =        mar,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15",
  MRnumber =     "82c:68016",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-1/p194-scholl/p194-scholl.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-1/p194-scholl/",
  abstract =     "New file organizations based on hashing and suitable
                 for data whose volume may vary rapidly recently
                 appeared in the literature. In the three schemes which
                 have been independently proposed, rehashing is avoided,
                 storage space is dynamically adjusted to the number of
                 records actually stored, and there are no overflow
                 records. Two of these techniques employ an index to the
                 data file. Retrieval is fast and storage utilization is
                 low.\par

                 In order to increase storage utilization, we introduce
                 two schemes based on a similar idea and analyze the
                 performance of the second scheme. Both techniques use
                 an index of much smaller size. In both schemes,
                 overflow records are accepted. The price which has to
                 be paid for the improvement in storage utilization is a
                 slight access cost degradation.",
  acknowledgement = ack-nhfb,
  annote =       "In the three schemes which proposed, rehashing is
                 avoided, storage space is dynamically adjusted to the
                 number of records actually stored, and there are no
                 overflow records. Two of these techniques employ an
                 index to the data file.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; data structure; dynamic hashing; file
                 organization; hashing; linear splitting",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Kung:1981:OMC,
  author =       "H. T. Kung and John T. Robinson",
  title =        "On Optimistic Methods for Concurrency Control",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "213--226",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Compiler/garbage.collection.bib;
                 Compiler/Heaps.bib; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/misc.1.bib; Misc/real.time.bib;
                 Object/Nierstrasz.bib",
  note =         "Reprinted in \cite{Stonebraker:1988:RDS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p213-kung/p213-kung.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p213-kung/",
  abstract =     "Most current approaches to concurrency control in
                 database systems rely on locking of data objects as a
                 control mechanism. In this paper, two families of
                 nonlocking concurrency controls are presented. The
                 methods used are ``optimistic'' in the sense that they
                 rely mainly on transaction backup as a control
                 mechanism, ``hoping'' that conflicts between
                 transactions will not occur. Applications for which
                 these methods should be more efficient than locking are
                 discussed.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency controls; data base systems, concurrency
                 other; databases; transaction processing",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}",
}

@Article{Boral:1981:PAS,
  author =       "Haran Boral and David J. DeWitt",
  title =        "Processor Allocation Strategies for Multiprocessor
                 Database Machines",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "227--254",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p227-boral/p227-boral.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p227-boral/",
  abstract =     "In this paper four alternative strategies for
                 assigning processors to queries in multiprocessor
                 database machines are described and evaluated. The
                 results demonstrate that SIMD database machines are
                 indeed a poor design when their performance is compared
                 with that of the three MIMD strategies presented.
                 \par

                 Also introduced is the application of data-flow machine
                 techniques to the processing of relational algebra
                 queries. A strategy that employs data-flow techniques
                 is shown to be superior to the other strategies
                 described by several experiments. Furthermore, if the
                 data-flow query processing strategy is employed, the
                 results indicate that a two-level storage hierarchy (in
                 which relations are paged between a shared data cache
                 and mass storage) does not have a significant impact on
                 performance.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative processors; back-end computers; computer
                 architecture; data base systems, Direct TODS; data-flow
                 computers; database machines; database management;
                 parallel processors; processor scheduling",
  subject =      "Information Systems --- Database Management ---
                 Database Machines (H.2.6); Information Systems ---
                 Database Management (H.2)",
}

@Article{Su:1981:TDT,
  author =       "Stanley Y. W. Su and Herman Lam and Der Her Lo",
  title =        "Transformation of Data Traversals and Operations in
                 Application Programs to Account for Semantic Changes of
                 Databases",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "255--294",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p255-su/p255-su.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p255-su/",
  abstract =     "This paper addresses the problem of application
                 program conversion to account for changes in database
                 semantics that result in changes in the schema and
                 database contents. With the observation that the
                 existing data models can be viewed as alternative ways
                 of modeling the same database semantics, a methodology
                 of application program analysis and conversion based on
                 an existing-DBMS-model-and schema-independent
                 representation of both the database and programs is
                 presented. In this methodology, the source and target
                 databases are described in terms of the association
                 types of a semantic association model. The structural
                 properties, the integrity constraints, and the
                 operational characteristics (storage operation
                 behaviors) of the association types are more explicitly
                 defined to reveal the semantics that is generally
                 hidden in application programs. The explicit
                 descriptions of the source and target databases are
                 used as the basis for program analysis and conversion.
                 Application programs are described in terms of a small
                 number of ``access patterns'' which define the data
                 traversals and operations of the programs. In addition
                 to the methodology, this paper (1) describes a model of
                 a generalized application program conversion system
                 that serves as a framework for research, (2) presents
                 an analysis of access patterns that serve as the
                 primitives for program description, (3) delineates some
                 meaningful semantic changes to databases and their
                 corresponding transformation rules for program
                 conversion, (4) illustrates the application of these
                 rules to two different approaches to program conversion
                 problems, and (5) reports on the development effort
                 undertaken at the University of Florida.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access pattern; application program conversion; data
                 base systems; database changes; semantic data model;
                 transformation rules",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8); Information Systems ---
                 Database Management --- Physical Design (H.2.2): {\bf
                 Access methods}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}",
}

@Article{Clemons:1981:DES,
  author =       "Eric K. Clemons",
  title =        "Design of an External Schema Facility to Define and
                 Process Recursive Structures",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "295--311",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p295-clemons/p295-clemons.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p295-clemons/",
  abstract =     "The role of the external schema is to support user
                 views of data and thus to provide programmers with
                 easier data access. This author believes that an
                 external schema facility is best based on hierarchies,
                 both simple and recursive. After a brief introduction
                 to an external schema facility to support simple
                 hierarchical user views, the requirements for a
                 facility for recursive hierarchies are listed and the
                 necessary extensions to the external schema definition
                 language are offered.\par

                 Functions that must be provided for generality in
                 definition are node specification and node control.
                 Tree traversal functions must be provided for
                 processing. Definitions of each and examples of use are
                 presented.",
  acknowledgement = ack-nhfb,
  annote =       "[Ahad,Yao,Choi87] A.2.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "ANSI SPARC architectures; data base systems; external
                 schemata; recursive data structures; user views",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema}",
}

@Article{Davida:1981:DES,
  author =       "George I. Davida and David L. Wells and John B. Kam",
  title =        "A Database Encryption System with Subkeys",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "312--328",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15",
  MRnumber =     "82f:68020",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p312-davida/p312-davida.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p312-davida/",
  abstract =     "A new cryptosystem that is suitable for database
                 encryption is presented. The system has the important
                 property of having subkeys that allow the encryption
                 and decryption of fields within a record. The system is
                 based on the Chinese Remainder Theorem.",
  acknowledgement = ack-nhfb,
  annote =       "Subkeys allow the encryption and decryption of fields
                 within a record. The system is based on the Chinese
                 Remainder Theorem.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "codes, symbolic; data base systems; data security;
                 databases; decryption; encryption; subkeys",
  subject =      "Data --- Data Encryption (E.3)",
}

@Article{Ling:1981:ITN,
  author =       "Tok Wang Ling and Frank W. Tompa and Tiko Kameda",
  title =        "An Improved Third Normal Form for Relational
                 Databases",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "329--346",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15",
  MRnumber =     "82f:68024",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p329-ling/p329-ling.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p329-ling/",
  abstract =     "In this paper, we show that some Codd third normal
                 form relations may contain ``superfluous'' attributes
                 because the definitions of transitive dependency and
                 prime attribute are inadequate when applied to sets of
                 relations. To correct this, an improved third normal
                 form is defined and an algorithm is given to construct
                 a set of relations from a given set of functional
                 dependencies in such a way that the superfluous
                 attributes are guaranteed to be removed. This new
                 normal form is compared with other existing definitions
                 of third normal form, and the deletion normalization
                 method proposed is shown to subsume the decomposition
                 method of normalization.",
  acknowledgement = ack-nhfb,
  annote =       "An improved third normal form is defined and an
                 algorithm is given to construct a set of relations from
                 a given set of functional dependencies in such a way
                 that the superfluous attributes are guaranteed to be
                 removed.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "covering; data base systems; database design;
                 functional dependency; normalization; prime attribute;
                 reconstructibility; relational schema; third normal
                 form; transitive dependency",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Schema and subschema}",
}

@Article{McLean:1981:CSC,
  author =       "Gordon {McLean, Jr.}",
  title =        "Comments on {SDD-1} Concurrency Control Mechanisms",
  journal =      j-TODS,
  volume =       "6",
  number =       "2",
  pages =        "347--350",
  month =        jun,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-2/p347-mclean/p347-mclean.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-2/p347-mclean/",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}",
}

@Article{Hammer:1981:DDS,
  author =       "Michael Hammer and Dennis Mc Leod",
  title =        "Database Description with {SDM}: a Semantic Database
                 Model",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "351--386",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/bibdb.bib;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  note =         "Reprinted in \cite{Stonebraker:1988:RDS}. Also
                 published in \cite{Zdonik:1990:ROO}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-3/p351-hammer/p351-hammer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p351-hammer/",
  abstract =     "SDM is a high-level semantics-based database
                 description and structuring formalism (database model)
                 for databases. This database model is designed to
                 capture more of the meaning of an application
                 environment than is possible with contemporary database
                 models. An SDM specification describes a database in
                 terms of the kinds of entities that exist in the
                 application environment, the classifications and
                 groupings of those entities, and the structural
                 interconnections among them. SDM provides a collection
                 of high-level modeling primitives to capture the
                 semantics of an application environment. By
                 accommodating derived information in a database
                 structural specification, SDM allows the same
                 information to be viewed in several ways; this makes it
                 possible to directly accommodate the variety of needs
                 and processing requirements typically present in
                 database applications. The design of the present SDM is
                 based on our experience in using a preliminary version
                 of it.\par

                 SDM is designed to enhance the effectiveness and
                 usability of database systems. An SDM database
                 description can serve as a formal specification and
                 documentation tool for a database; it can provide a
                 basis for supporting a variety of powerful user
                 interface facilities, it can serve as a conceptual
                 database model in the database design process; and, it
                 can be used as the database model for a new kind of
                 database management system.",
  acknowledgement = ack-nhfb,
  annote =       "SDM is a high-level semantics-based database model, to
                 capture the meaning of an application environment. One
                 of the papers usually referred to when discussing
                 semantic data models. Describes a model which permits a
                 lot of flexibility and expressiveness, and is
                 consequently difficult to implement. Advantage is that
                 it can be used as a specification and documentation
                 tool. Good introduction, giving an overview of (some?,
                 most?, all?) problems in semantic data models. The
                 section describing SDM DDL is a bit too detailed (one
                 needs to pick up the essential concepts like
                 subclassing, and redundancy in model (which may be
                 necessary to make the model easier to use)). Some
                 discussion of inheritance is also present. Nothing much
                 is said in the final discussion. Reasonable paper. To
                 benefit, one needs to be careful not to get lost in the
                 details. A detailed description of the semantic data
                 model.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database definition; database
                 management; database modeling; database models;
                 database semantics; logical database design",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}",
}

@Article{Fagin:1981:NFR,
  author =       "Ronald Fagin",
  title =        "A Normal Form for Relational Databases That is Based
                 on Domains and Keys",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "387--415",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Compiler/prog.lang.theory.bib;
                 Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-3/p387-fagin/p387-fagin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p387-fagin/",
  abstract =     "The new normal form for relational databases, called
                 domain-key normal form (DK\slash NF), is defined. Also,
                 formal definitions of insertion anomaly and deletion
                 anomaly are presented. It is shown that a schema is in
                 DK\slash NF if and only if it has no insertion or
                 deletion anomalies. Unlike previously defined normal
                 forms, DK\slash NF is not defined in terms of
                 traditional dependencies (functional, multivalued, or
                 join). Instead, it is defined in terms of the more
                 primitive concepts of domain and key, along with the
                 general concept of a ``constraint''. It is considered
                 how the definitions of traditional normal forms might
                 be modified by taking into consideration, for the first
                 time, the combinatorial consequences of bounded domain
                 sizes. It is shown that after this modification, these
                 traditional normal forms are all implied by DK\slash
                 NF. In particular, if all domains are infinite, then
                 these traditional normal forms are all implied by
                 DK\slash NF.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "anomaly; complexity; data base systems; database
                 design; DK/NF; domain-key normal form; functional
                 dependency; join dependency; multivalued dependency;
                 normalization; relational database",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Normal forms}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Relational databases}",
}

@Article{Hong:1981:AHS,
  author =       "Y. C. Hong and Stanley Y. W. Su",
  title =        "Associative Hardware and Software Techniques for
                 Integrity Control",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "416--440",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-3/p416-hong/p416-hong.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p416-hong/",
  abstract =     "This paper presents the integrity control mechanism of
                 the associative processing system, CASSM. The mechanism
                 takes advantage of the associative techniques, such as
                 content and context addressing, tagging and marking
                 data, parallel processing, automatic triggering of
                 integrity control procedures, etc., for integrity
                 control and as a result offers three significant
                 advantages: (1) The problem of staging data in a main
                 memory for integrity checking can be eliminated because
                 database storage operations are verified at the place
                 where the data are stored. (2) The backout or merging
                 procedures are relatively easy and inexpensive in the
                 associative system because modified copies can be
                 substituted for the originals or may be discarded by
                 merely changing their associated tags. (3) The database
                 management system software is simplified because
                 database integrity functions are handled by the
                 associative processing system to which a mainframe
                 computer is a front-end computer.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "assertion and trigger; associative techniques;
                 cellular-logic devices; data base systems; database
                 integrity; database management; integrity control;
                 integrity control, SYWSu hardware support relational
                 database machine TODS",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0): {\bf Security, integrity, and
                 protection**}",
}

@Article{March:1981:FMS,
  author =       "Salvatore T. March and Dennis G. Severance and Michael
                 Wilens",
  title =        "Frame Memory: a Storage Architecture to Support Rapid
                 Design and Implementation of Efficient Databases",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "441--463",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-3/p441-march/p441-march.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p441-march/",
  abstract =     "Frame memory is a virtual view of secondary storage
                 that can be implemented with reasonable overhead to
                 support database record storage and accessing
                 requirements. Frame memory is designed so that its
                 operating characteristics can be easily manipulated by
                 either designers or design algorithms, while
                 performance effects of such changes can be accurately
                 predicted. Automated design procedures exist to
                 generate and evaluate alternative database designs
                 built upon frame memory, and the existence of these
                 procedures establishes frames as an attractive memory
                 management architecture for future database management
                 systems.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "analytic modeling; data base systems; database design
                 system; database machine; hardware support; TODS;
                 virtual secondary storage",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2); Software ---
                 Operating Systems --- Storage Management (D.4.2): {\bf
                 Secondary storage}",
}

@Article{vandeRiet:1981:HLP,
  author =       "Reind P. {van de Riet} and Anthony I. Wasserman and
                 Martin L. Kersten and Wiebren {de Jonge}",
  title =        "High-Level Programming Features for Improving the
                 Efficiency of a Relational Database System",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "464--485",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: UCSF, Lab. of Med. Inf. Science,
                 Tech. Rpt. 44, Feb. 1980.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-3/p464-van_de_riet/p464-van_de_riet.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p464-van_de_riet/",
  abstract =     "This paper discusses some high-level language
                 programming constructs that can be used to manipulate
                 the relations of a relational database system
                 efficiently. Three different constructs are described:
                 (1) tuple identifiers that directly reference tuples of
                 a relation; (2) cursors that may iterate over the
                 tuples of a relation; and (3) markings, a form of
                 temporary relation consisting of a set of tuple
                 identifiers. In each case, attention is given to
                 syntactic, semantic, and implementation considerations.
                 \par

                 The use of these features is first presented within the
                 context of the programming language PLAIN, and it is
                 then shown how these features could be used more
                 generally to provide database manipulation capabilities
                 in a high-level programming language. Consideration is
                 also given to issues of programming methodology, with
                 an important goal being the achievement of a balance
                 between the enforcement of good programming practices
                 and the ability to write efficient programs.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "markings; PLAIN; programming languages; programming
                 methodology; relational algebra; relational database
                 management",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Database (persistent)
                 programming languages}",
}

@Article{Culik:1981:DMT,
  author =       "K. {Culik II} and Th. Ottmann and D. Wood",
  title =        "Dense multiway trees",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "486--512",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15 (05C05)",
  MRnumber =     "82m:68038",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p486-culic/",
  abstract =     "B-trees of order $m$ are a ``balanced'' class of
                 $m$-ary trees, which have applications in the areas of
                 file organization. In fact, they have been the only
                 choice when balanced multiway trees are required.
                 Although they have very simple insertion and deletion
                 algorithms, their storage utilization, that is, the
                 number of keys per page or node, is at worst 50
                 percent. In the present paper we investigate a new
                 class of balanced $m$-ary trees, the dense multiway
                 trees, and compare their storage utilization with that
                 of B-trees of order $m$. \par

                 Surprisingly, we are able to demonstrate that weakly
                 dense multiway trees have an $ (l o g_2 N) $ insertion
                 algorithm. We also show that inserting $ m h - 1 $ keys
                 in ascending order into an initially empty dense
                 multiway tree yields the complete $m$-ary tree of
                 height $h$, and that at intermediate steps in the
                 insertion sequence the intermediate trees can also be
                 considered to be as dense as possible. Furthermore, an
                 analysis of the limiting dynamic behavior of the dense
                 $m$-ary trees under insertion shows that the average
                 storage utilization tends to 1; that is, the trees
                 become as dense as possible. This motivates the use of
                 the term ``dense.''",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "B-trees; balanced trees; dense trees; multiway trees;
                 search trees; storage utilization",
  subject =      "Data --- Data Structures (E.1): {\bf Trees}",
}

@Article{Comer:1981:AHF,
  author =       "Douglas Comer",
  title =        "Analysis of a Heuristic for Full Trie Minimization",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "513--537",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-3/p513-comer/p513-comer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-3/p513-comer/",
  abstract =     "A trie is a distributed-key search tree in which
                 records from a file correspond to leaves in the tree.
                 Retrieval consists of following a path from one root to
                 a leaf, where the choice of edge at each node is
                 determined by attribute values of the key. For full
                 tries, those in which all leaves lie at the same depth,
                 the problem of finding an ordering of attributes which
                 yields a minimum size trie is NP-complete.\par

                 This paper considers a ``greedy'' heuristic for
                 constructing low-cost tries. It presents simulation
                 experiments which show that the greedy method tends to
                 produce tries with small size, and analysis leading to
                 a worst case bound on approximations produced by the
                 heuristic. It also shows a class of files for which the
                 greedy method may perform badly, producing tries of
                 high cost.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; heuristic; trie index; trie size",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Problem Solving, Control Methods, and Search
                 (I.2.8): {\bf Heuristic methods}",
}

@Article{Kent:1981:CAU,
  author =       "W. Kent",
  title =        "Consequences of Assuming a Universal Relation",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "539--556",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/database.bib;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See remark \cite{Ullman:1983:KCA}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p539-kent/p539-kent.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p539-kent/",
  abstract =     "Although central to the current direction of
                 dependency theory, the assumption of a universal
                 relation is incompatible with some aspects of
                 relational database theory and practice. Furthermore,
                 the universal relation is itself ill defined in some
                 important ways. And, under the universal relation
                 assumption, the decomposition approach to database
                 design becomes virtually indistinguishable from the
                 synthetic approach.",
  acknowledgement = ack-nhfb,
  annote =       "The assumption of a universal relation is incompatible
                 with some aspects of relational database theory and
                 practice. Under the universal relation assumption, the
                 decomposition approach to database design becomes
                 virtually indistinguishable from the synthetic
                 approach.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database design; dependency theory;
                 rational database; relational theory; universal
                 relation",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Models and Principles ---
                 Systems and Information Theory (H.1.1); Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1)",
}

@Article{Bancilhon:1981:USR,
  author =       "F. B. Bancilhon and N. Spyratos",
  title =        "Update Semantics of Relational Views",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "557--575",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/nonmono.bib; Compendex database;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See comment \cite{Keller:1987:CBS}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p557-bancilhon/p557-bancilhon.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p557-bancilhon/",
  abstract =     "A database view is a portion of the data structured in
                 a way suitable to a specific application. Updates on
                 views must be translated into updates on the underlying
                 database. This paper studies the translation process in
                 the relational model.\par

                 The procedure is as follows: first, a ``complete'' set
                 of updates is defined such that\par

                 together with every update the set contains a
                 ``return'' update, that is, one that brings the view
                 back to the original state;\par

                 given two updates in the set, their composition is also
                 in the set.\par

                 To translate a complete set, we define a mapping called
                 a ``translator,'' that associates with each view update
                 a unique database update called a ``translation.'' The
                 constraint on a translation is to take the database to
                 a state mapping onto the updated view. The constraint
                 on the translator is to be a morphism.\par

                 We propose a method for defining translators. Together
                 with the user-defined view, we define a
                 ``complementary'' view such that the database could be
                 computed from the view and its complement. We show that
                 a view can have many different complements and that the
                 choice of a complement determines an update policy.
                 Thus, we fix a view complement and we define the
                 translation of a given view update in such a way that
                 the complement remains invariant (``translation under
                 constant complement''). The main result of the paper
                 states that, given a complete set $U$ of view updates,
                 $U$ has a translator if and only if $U$ is translatable
                 under constant complement.",
  acknowledgement = ack-nhfb,
  annote =       "A mapping called a ``translator'', associates with
                 each view update a unique database update. A method for
                 defining translators with the user-defined view, define
                 a ``complementary'' view such that the database could
                 be computed from the view and its complement. We define
                 the translation of a given view update in such a way
                 that the complement remains invariant. Aplies to
                 Universal relations.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "conceptual model; data base systems; data model; data
                 semantics; database view; relation; relational model
                 database; update translation; view updating",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2)",
}

@Article{Baroody:1981:OOA,
  author =       "A. James {Baroody, Jr.} and David J. DeWitt",
  title =        "An Object-Oriented Approach to Database System
                 Implementation",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "576--601",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Object/Nierstrasz.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p576-baroody/p576-baroody.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p576-baroody/",
  abstract =     "This paper examines object-oriented programming as an
                 implementation technique for database systems. The
                 object-oriented approach encapsulates the
                 representations of database entities and relationships
                 with the procedures that manipulate them. To achieve
                 this, we first define abstractions of the modeling
                 constructs of the data model that describe their common
                 properties and behavior. Then we represent the entity
                 types and relationship types in the conceptual schema
                 and the internal schema by objects that are instances
                 of these abstractions. The generic procedures (data
                 manipulation routines) that comprise the user interface
                 can now be implemented as calls to the procedures
                 associated with these objects.\par

                 A generic procedure model of database implementation
                 techniques is presented and discussed. Several current
                 database system implementation techniques are
                 illustrated as examples of this model, followed by a
                 critical analysis of our implementation technique based
                 on the use of objects. We demonstrate that the
                 object-oriented approach has advantages of data
                 independence, run-time efficiency due to eliminating
                 access to system descriptors, and support for low-level
                 views.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming, olit-db casais; data base
                 systems; data independence; data manipulation routines;
                 database systems; high-level languages; object-oriented
                 programming; procedural binding",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Object-oriented databases};
                 Computer Systems Organization --- Computer System
                 Implementation (C.5); Information Systems --- Database
                 Management --- Languages (H.2.3)",
}

@Article{Bernstein:1981:QPS,
  author =       "Philip A. Bernstein and Nathan Goodman and Eugene Wong
                 and Christopher L. Reeve and James B. {Rothnie, Jr.}",
  title =        "Query Processing in a System for Distributed Databases
                 ({SDD-1})",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "602--625",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p602-bernstein/p602-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p602-bernstein/",
  abstract =     "This paper describes the techniques used to optimize
                 relational queries in the SDD-1 distributed database
                 system. Queries are submitted to SDD-1 in a high-level
                 procedural language called Datalanguage. Optimization
                 begins by translating each Datalanguage query into a
                 relational calculus form called an {\em envelope},
                 which is essentially an aggregate-free QUEL query. This
                 paper is primarily concerned with the optimization of
                 envelopes.\par

                 Envelopes are processed in two phases. The first phase
                 executes relational operations at various sites of the
                 distributed database in order to delimit a subset of
                 the database that contains all data relevant to the
                 envelope. This subset is called a {\em reduction\/} of
                 the database. The second phase transmits the reduction
                 to one designated site, and the query is executed
                 locally at that site.\par

                 The critical optimization problem is to perform the
                 reduction phase efficiently. Success depends on
                 designing a good repertoire of operators to use during
                 this phase, and an effective algorithm for deciding
                 which of these operators to use in processing a given
                 envelope against a given database. The principal
                 reduction operator that we employ is called a {\em
                 semijoin}. In this paper we define the semijoin
                 operator, explain why semijoin is an effective
                 reduction operator, and present an algorithm that
                 constructs a cost-effective program of semijoins, given
                 an envelope and a database.",
  acknowledgement = ack-nhfb,
  annote =       "Techniques to optimize relational queries in the SDD-1
                 distributed database system. First phase executes
                 relational operations at various sites to delimit a
                 subset called a reduction. The second phase transmits
                 the reduction to one designated site. The principal
                 reduction operator, introduced here, is called a
                 semijoin.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- subroutines; data base
                 systems; distributed databases; query optimization;
                 query processing; query processing, TODS semijoins
                 semi-join join; relational databases; semijoins",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Distributed databases}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Relational databases}",
}

@Article{Welty:1981:HFC,
  author =       "Charles Welty and David W. Stemple",
  title =        "Human Factors Comparison of a Procedural and a
                 Nonprocedural Query Language",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "626--649",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p626-welty/p626-welty.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p626-welty/",
  abstract =     "Two experiments testing the ability of subjects to
                 write queries in two different query languages were
                 run. The two languages, SQL and TABLET, differ
                 primarily in their procedurality; both languages use
                 the relational data model, and their Halstead levels
                 are similar. Constructs in the languages which do not
                 affect their procedurality are identical. The two
                 languages were learned by the experimental subjects
                 almost exclusively from manuals presenting the same
                 examples and problems ordered identically for both
                 languages. The results of the experiments show that
                 subjects using the more procedural language wrote
                 difficult queries better than subjects using the less
                 procedural language. The results of the experiments are
                 also used to compare corresponding constructs in the
                 two languages and to recommend improvements for these
                 constructs.",
  acknowledgement = ack-nhfb,
  annote =       "SQL and TABLET. The results show that subjects using
                 the more procedural language wrote difficult queries
                 better than subjects using the less procedural
                 language.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; database systems; human factors;
                 procedural and nonprocedural languages; query
                 languages",
  subject =      "Information Systems --- Models and Principles ---
                 User/Machine Systems (H.1.2): {\bf Human factors};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Systems (H.2.4)",
}

@Article{Lehman:1981:ELC,
  author =       "Philip L. Lehman and S. Bing Yao",
  title =        "Efficient Locking for Concurrent Operations on
                 {B-Trees}",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "650--670",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p650-lehman/p650-lehman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p650-lehman/",
  abstract =     "The B-tree and its variants have been found to be
                 highly useful (both theoretically and in practice) for
                 storing large amounts of information, especially on
                 secondary storage devices. We examine the problem of
                 overcoming the inherent difficulty of concurrent
                 operations on such structures, using a practical
                 storage model. A single additional ``link'' pointer in
                 each node allows a process to easily recover from tree
                 modifications performed by other concurrent processes.
                 Our solution compares favorably with earlier solutions
                 in that the locking scheme is simpler (no read-locks
                 are used) and only a (small) constant number of nodes
                 are locked by any update process at any given time. An
                 informal correctness proof for our system is given.",
  acknowledgement = ack-nhfb,
  annote =       "A single additional `link' pointer in each node allows
                 a process to easily recover from tree modifications
                 performed by other concurrent processes. No read-locks
                 are used only a (small) constant number of nodes are
                 locked by any update process at any given time.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "B-tree; concurrenct algorithms; concurrency controls;
                 consistency; correctness; data processing; data
                 structures; database; index organizations; locking
                 protocols; multiway search trees",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Mathematics of
                 Computing --- Discrete Mathematics --- Graph Theory
                 (G.2.2): {\bf Trees}",
}

@Article{Larson:1981:AIS,
  author =       "Per-{\AA}ke Larson",
  title =        "Analysis of Index-Sequential Files with Overflow
                 Chaining",
  journal =      j-TODS,
  volume =       "6",
  number =       "4",
  pages =        "671--680",
  month =        dec,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68B15 (68H05)",
  MRnumber =     "82m:68044",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1981-6-4/p671-larson/p671-larson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1981-6-4/p671-larson/",
  abstract =     "The gradual performance deterioration caused by
                 deletions from and insertions into an index-sequential
                 file after loading is analyzed. The model developed
                 assumes that overflow records are handled by chaining.
                 Formulas for computing the expected number of overflow
                 records and the expected number of additional accesses
                 caused by the overflow records for both successful and
                 unsuccessful searches are derived.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "analysis of algorithms; analytic model; data
                 processing, TODS ISAM; file organization; file
                 structure; index sequential files; indexed sequential
                 access method; ISAM; overflow; overflow chaining;
                 overflow handling; performance analysis",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Access
                 methods}",
}

@Article{Comer:1981:EKD,
  author =       "D. Comer",
  title =        "Extended {K-d} Tree Database Organization: a Dynamic
                 Multiattribute File Corresponds to Leaves in the Tree",
  journal =      j-TODS,
  volume =       "6",
  number =       "3",
  pages =        "??--??",
  month =        sep,
  year =         "1981",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:49:00 1996",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  annote =       "This paper considers a `greedy' heuristic for
                 constructing low-cost trees.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  xxnote =       "This paper does not seem to be published in TODS.",
}

@Article{Zaniolo:1982:DRN,
  author =       "C. Zaniolo",
  title =        "Database Relations with Null Values",
  journal =      j-TODS,
  volume =       "1",
  number =       "1",
  pages =        "??--??",
  month =        mar,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:48:57 1996",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  annote =       "a three-valued logic: TRUE, FALSE, UNKNOWN",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  xxnote =       "This paper does not seem to be published in TODS.",
}

@Article{Katz:1982:DCD,
  author =       "R. H. Katz and E. Wong",
  title =        "Decompiling {CODASYL DML} into Relational Queries",
  journal =      j-TODS,
  volume =       "7",
  number =       "1",
  pages =        "1--23",
  month =        mar,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-1/p1-katz/p1-katz.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-1/p1-katz/",
  abstract =     "A ``decompilation'' algorithm is developed to
                 transform a program written with the procedural
                 operations of CODASYL DML into one which interacts with
                 a relational system via a nonprocedural query
                 specification. An Access Path Model is introduced to
                 interpret the semantic accesses performed by the
                 program. Data flow analysis is used to determine how
                 FIND operations implement semantic accesses. A sequence
                 of these is mapped into a relational query and embedded
                 into the original program. The class of programs for
                 which the algorithm succeeds is characterized.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems; decompilation; semantic data
                 models",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Relational databases};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data manipulation languages
                 (DML)}; Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Heterogeneous
                 Databases (H.2.5): {\bf Program translation**}",
}

@Article{Zaniolo:1982:FAD,
  author =       "Carlo Zaniolo and Michel A. Melkanoff",
  title =        "A Formal Approach to the Definition and the Design of
                 Conceptual Schemata for Database Systems",
  journal =      j-TODS,
  volume =       "7",
  number =       "1",
  pages =        "24--59",
  month =        mar,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-1/p24-zaniolo/p24-zaniolo.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-1/p24-zaniolo/",
  abstract =     "A formal approach is proposed to the definition and
                 the design of conceptual database diagrams to be used
                 as conceptual schemata in a system featuring a
                 multilevel schema architecture, and as an aid for the
                 design of other forms of schemata. We consider E-R
                 (entity-relationship) diagrams, and we introduce a new
                 representation called {\em CAZ\/}-graphs. A rigorous
                 connection is established between these diagrams and
                 some formal constraints used to describe relationships
                 in the framework of the relational data model. These
                 include functional and multivalued dependencies of
                 database relations. The basis for our schemata is a
                 combined representation for two fundamental structures
                 underlying every relation: the first defined by its
                 minimal atomic decompositions, the second by its
                 elementary functional dependencies.\par

                 The interaction between these two structures is
                 explored, and we show that, jointly, they can represent
                 a wide spectrum of database relationships, of which the
                 well-known one-to-one, one-to-many, and many-to-many
                 associations constitute only a small subset. It is
                 suggested that a main objective in conceptual schema
                 design is to ensure a complete representation of these
                 two structures. A procedure is presented to design
                 schemata which obtain this objective while eliminating
                 redundancy. A simple correspondence between the
                 topological properties of these schemata and the
                 structure of multivalued dependencies of the original
                 relation is established. Various applications are
                 discussed and a number of illustrative examples are
                 given.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems, logical design TODS",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}; Information Systems --- Database Management
                 --- Logical Design (H.2.1): {\bf Schema and
                 subschema}",
}

@Article{Batory:1982:OFD,
  author =       "D. S. Batory",
  title =        "Optimal File Designs and Reorganization Points",
  journal =      j-TODS,
  volume =       "7",
  number =       "1",
  pages =        "60--81",
  month =        mar,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: University of Toronto,
                 TR-CSRG-110, 1980.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-1/p60-batory/p60-batory.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-1/p60-batory/",
  abstract =     "A model for studying the combined problems of file
                 design and file reorganization is presented. New
                 modeling techniques for predicting the performance
                 evolution of files and for finding optimal
                 reorganization points for files are introduced.
                 Applications of the model to hash-based and
                 indexed-sequential files reveal important relationships
                 between initial loading factors and reorganization
                 frequency. A practical file design strategy, based on
                 these relationships, is proposed.",
  acknowledgement = ack-nhfb,
  annote =       "Applications of the model to hash-based and
                 indexed-sequential files reveal important relationships
                 between initial loading factors and reorganization
                 frequency.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; file design; file reorganization",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Physical Design (H.2.2)",
}

@Article{Du:1982:DAC,
  author =       "H. C. Du and J. S. Sobolewski",
  title =        "Disk Allocation for {Cartesian} Product Files on
                 Multiple-Disk Systems",
  journal =      j-TODS,
  volume =       "7",
  number =       "1",
  pages =        "82--101",
  month =        mar,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-1/p82-du/p82-du.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-1/p82-du/",
  abstract =     "Cartesian product files have recently been shown to
                 exhibit attractive properties for partial match
                 queries. This paper considers the file allocation
                 problem for Cartesian product files, which can be
                 stated as follows: Given a $k$-attribute Cartesian
                 product file and an $m$-disk system, allocate buckets
                 among the $m$ disks in such a way that, for all
                 possible partial match queries, the concurrency of disk
                 accesses is maximized. The Disk Modulo (DM) allocation
                 method is described first, and it is shown to be strict
                 optimal under many conditions commonly occurring in
                 practice, including all possible partial match queries
                 when the number of disks is 2 or 3. It is also shown
                 that although it has good performance, the DM
                 allocation method is not strict optimal for all
                 possible partial match queries when the number of disks
                 is greater than 3. The General Disk Modulo (GDM)
                 allocation method is then described, and a sufficient
                 but not necessary condition for strict optimality of
                 the GDM method for all partial match queries and any
                 number of disks is then derived. Simulation studies
                 comparing the DM and random allocation methods in terms
                 of the average number of disk accesses, in response to
                 various classes of partial match queries, show the
                 former to be significantly more effective even when the
                 number of disks is greater than 3, that is, even in
                 cases where the DM method is not strict optimal. The
                 results that have been derived formally and shown by
                 simulation can be used for more effective design of
                 optimal file systems for partial match queries. When
                 considering multiple-disk systems with independent
                 access paths, it is important to ensure that similar
                 records are clustered into the same or similar buckets,
                 while similar buckets should be dispersed uniformly
                 among the disks.",
  acknowledgement = ack-nhfb,
  annote =       "For partial match queries. Allocate buckets among the
                 m disks in such a way that, for all possible partial
                 match queries, the concurrency of disk accesses is
                 maximized.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Cartesian product files; data processing",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2); Information Systems ---
                 Information Storage and Retrieval --- Information
                 Storage (H.3.2): {\bf File organization}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Search
                 process}; Information Systems --- Database Management
                 --- Systems (H.2.4)",
}

@Article{Dahl:1982:DSD,
  author =       "Ver{\'o}nica Dahl",
  title =        "On Database Systems Development through Logic",
  journal =      j-TODS,
  volume =       "7",
  number =       "1",
  pages =        "102--123",
  month =        mar,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05 (03B99)",
  MRnumber =     "83f:68112",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/nonmono.bib; Compendex database;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-1/p102-dahl/p102-dahl.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-1/p102-dahl/",
  abstract =     "The use of logic as a single tool for formalizing and
                 implementing different aspects of database systems in a
                 uniform manner is discussed. The discussion focuses on
                 relational databases with deductive capabilities and
                 very high-level querying and defining features. The
                 computational interpretation of logic is briefly
                 reviewed, and then several pros and cons concerning the
                 description of data, programs, queries, and language
                 parser in terms of logic programs are examined. The
                 inadequacies are discussed, and it is shown that they
                 can be overcome by the introduction of convenient
                 extensions into logic programming. Finally, an
                 experimental database query system with a natural
                 language front end, implemented in PROLOG, is presented
                 as an illustration of these concepts. A description of
                 the latter from the user's point of view and a sample
                 consultation session in Spanish are included.",
  acknowledgement = ack-nhfb,
  annote =       "The use of logic as a single tool for relational
                 databases with deductive capabilities and very
                 high-level querying and defining features. Inadequacies
                 are discussed, and overcome by extensions into logic
                 programming. An experimental database query system with
                 a natural language front end, implemented in PROLOG, is
                 presented.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems, TODS relational database; rational
                 database",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Mathematical Logic
                 (F.4.1): {\bf Logic and constraint programming};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Relational databases}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf
                 Prolog}",
}

@Article{Addis:1982:RBL,
  author =       "T. R. Addis",
  title =        "A Relation-Based Language Interpreter for a Content
                 Addressable File Store",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "125--163",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p125-addis/p125-addis.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p125-addis/",
  abstract =     "The combination of the Content Addressable File Store
                 (CAFS \footnote{CAFS is a registered trademark of
                 International Computers Limited}) and an extension of
                 relational analysis is described. This combination
                 allows a simple and compact implementation of a
                 database query and update language (FIDL). The language
                 has one of the important properties of a ``natural''
                 language interface by using a ``world model'' derived
                 from the relational analysis. The interpreter (FLIN)
                 takes full advantage of the CAFS by employing a unique
                 database storage technique which results in a fast
                 response to both queries and updates.",
  acknowledgement = ack-nhfb,
  annote =       "ICL CAFS is used.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer operating systems --- program Interpreters,
                 hardware support database machine CAFS TODS; content
                 addressing; data base systems",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Database Machines
                 (H.2.6)",
}

@Article{Buneman:1982:ITD,
  author =       "Peter Buneman and Robert E. Frankel and Rishiyur
                 Nikhil",
  title =        "An Implementation Technique for Database Query
                 Languages",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "164--186",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/Functional.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p164-buneman/p164-buneman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p164-buneman/",
  abstract =     "Structured query languages, such as those available
                 for relational databases, are becoming increasingly
                 desirable for all database management systems. Such
                 languages are applicative: there is no need for an
                 assignment or update statement. A new technique is
                 described that allows for the implementation of
                 applicative query languages against most commonly used
                 database systems. The technique involves ``lazy''
                 evaluation and has a number of advantages over existing
                 methods: it allows queries and functions of arbitrary
                 complexity to be constructed; it reduces the use of
                 secondary storage; it provides a simple control
                 structure through which interfaces to other programs
                 may be constructed; and the implementation, including
                 the database interface, is quite compact. Although the
                 technique is presented for a specific functional
                 programming system and for a CODASYL DBMS, it is
                 general and may be used for other query languages and
                 database systems.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "applicative programming; coroutines; database
                 interfaces; functional, data base systems; lazy
                 evaluation; query languages; TODS functional FQL
                 applicative programming lazy evaluation",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf Applicative
                 (functional) languages}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf Query
                 languages}",
}

@Article{Obermarck:1982:DDD,
  author =       "Ron Obermarck",
  title =        "Distributed Deadlock Detection Algorithm",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "187--208",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/misc.1.bib; Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p187-obermarck/p187-obermarck.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p187-obermarck/",
  abstract =     "We propose an algorithm for detecting deadlocks among
                 transactions running concurrently in a distributed
                 processing network (i.e., a distributed database
                 system). The proposed algorithm is a distributed
                 deadlock detection algorithm. A proof of the
                 correctness of the distributed portion of the algorithm
                 is given, followed by an example of the algorithm in
                 operation. The performance characteristics of the
                 algorithm are also presented.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming; data base systems; deadlock
                 detection",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4): {\bf Distributed databases}; Software ---
                 Operating Systems --- Process Management (D.4.1): {\bf
                 Deadlocks}; Software --- Operating Systems ---
                 Organization and Design (D.4.7): {\bf Distributed
                 systems}",
}

@Article{Garcia-Molina:1982:ROT,
  author =       "H{\'e}ctor Garc{\'\i}a-Molina and Gio Wiederhold",
  title =        "Read-Only Transactions in a Distributed Database",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "209--234",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p209-garcia-molina/p209-garcia-molina.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p209-garcia-molina/",
  abstract =     "A read-only transaction or query is a transaction
                 which does not modify any data. Read-only transactions
                 could be processed with general transaction processing
                 algorithms, but in many cases it is more efficient to
                 process read-only transactions with special algorithms
                 which take advantage of the knowledge that the
                 transaction only reads. This paper defines the various
                 consistency and currency requirements that read-only
                 transactions may have. The processing of the different
                 classes of read-only transactions in a distributed
                 database is discussed. The concept of {$R$} insularity
                 is introduced to characterize both the read-only and
                 update algorithms. Several simple update and read-only
                 transaction processing algorithms are presented to
                 illustrate how the query requirements and the update
                 algorithms affect the read-only transaction processing
                 algorithms.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; consistency; currency; data base
                 systems, TODS R insularity; query; R insularity;
                 read-only transaction; schedule; serializability;
                 transaction; transaction processing algorithm",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4): {\bf Distributed databases}",
}

@Article{Shneiderman:1982:AAR,
  author =       "Ben Shneiderman and Glenn Thomas",
  title =        "An Architecture for Automatic Relational Database
                 System Conversion",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "235--257",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/database.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p235-shneiderman/p235-shneiderman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p235-shneiderman/",
  abstract =     "Changes in requirements for database systems
                 necessitate schema restructuring, database translation,
                 and application or query program conversion. An
                 alternative to the lengthy manual revision process is
                 proposed by offering a set of 15 transformations keyed
                 to the relational model of data and the relational
                 algebra. Motivations, examples, and detailed
                 descriptions are provided.",
  acknowledgement = ack-nhfb,
  annote =       "Alterations to the logical structure of a DB may
                 necessitate changes at three levels: (1) stored
                 database, (2) schema definition, and (3) application
                 programs or queries. Each transformation is assessed on
                 three features: (1) information preservation (data are
                 not destroyed, only their logical format is altered);
                 (2) data dependence (a data dependent transformation is
                 one in which the stored DB must be checked to determine
                 whether it is consistent with the logical format of the
                 target system); and (3) program dependence (a program
                 dependent transformation is one in which the
                 application programs must be checked to determine
                 whether the transformation is permissible). At every
                 stage the DB is kept in fourth normal form. The 15
                 transformations are divided into five groups. The first
                 group includes simple alterations, such as changing the
                 name of an attribute or relation (CHANGE NAME), or
                 adding or deleting attributes or relations (ADD/DELETE
                 ATTRIBUTES, INTRODUCE/SEPARATE). The role played by
                 keys in the relational model is clearly critical, and
                 particular care must be taken when transformations
                 involving these keys are being carried out. The second
                 group of transformations concerns the effect of adding
                 attributes to or deleting attributes from keys
                 (PROMOTE/DEMOTE). The third and fourth sets of
                 transformations are provided for the combining and
                 dividing of relations. (COMPOSE/DECOMPOSE,
                 PARTITION/MERGE). The final group of transformations is
                 concerned with functional dependencies",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "automatic conversion; data base systems; database
                 systems; relational model; transformations",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data manipulation languages
                 (DML)}; Information Systems --- Database Management ---
                 Heterogeneous Databases (H.2.5): {\bf Program
                 translation**}",
}

@Article{Roussopoulos:1982:VIR,
  author =       "Nicholas Roussopoulos",
  title =        "View Indexing in Relational Databases",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "258--290",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p258-roussopoulos/p258-roussopoulos.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p258-roussopoulos/",
  abstract =     "The design and maintenance of a useful database system
                 require efficient optimization of the logical access
                 paths which demonstrate repetitive usage patterns.
                 Views (classes of queries given by a query model) are
                 an appropriate intermediate logical representation for
                 databases. Frequently accessed views of databases need
                 to be supported by indexing to enhance retrieval. This
                 paper investigates the problem of selecting an optimal
                 index set of views and describes an efficient algorithm
                 for this selection.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data base systems, views precomputation index
                 selection TODS index selection; index selection",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}",
}

@Article{Jacobs:1982:IRL,
  author =       "Barry E. Jacobs and Alan R. Aronson and Anthony C.
                 Klug",
  title =        "On Interpretations of Relational Languages and
                 Solutions to the Implied Constraint Problem",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "291--315",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-2/p291-jacobs/p291-jacobs.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-2/p291-jacobs/",
  abstract =     "The interconnection between conceptual and external
                 levels of a relational database is made precise in
                 terms of the notion of ``interpretation'' between
                 first-order languages. This is then used to obtain a
                 methodology for discovering constraints at the external
                 level that are ``implied'' by constraints at the
                 conceptual level and by conceptual-to-external
                 mappings. It is also seen that these concepts are
                 important in other database issues, namely, automatic
                 program conversion, database design, and compile-time
                 error checking of embedded database languages. Although
                 this study deals exclusively with the relational
                 approach, it also discusses how these ideas can be
                 extended to hierarchical and network databases.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "constraints; data base systems; program conversion;
                 relational database",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Schema and subschema}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data manipulation languages (DML)}",
}

@Article{Chamberlin:1982:HFC,
  author =       "Donald D. Chamberlin",
  title =        "On ``Human Factors Comparison of a Procedural and a
                 Nonprocedural Query Language''",
  journal =      j-TODS,
  volume =       "7",
  number =       "2",
  pages =        "316--317",
  month =        jun,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:45:59 1996",
  bibsource =    "Database/Graefe.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "TODS technical correspondence",
}

@Article{Traiger:1982:TCD,
  author =       "Irving L. Traiger and Jim Gray and Cesare A. Galtieri
                 and Bruce G. Lindsay",
  title =        "Transactions and Consistency in Distributed Database
                 Systems",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "323--342",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p323-traiger/p323-traiger.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p323-traiger/",
  abstract =     "The concepts of transaction and of data consistency
                 are defined for a distributed system. The cases of
                 partitioned data, where fragments of a file are stored
                 at multiple nodes, and replicated data, where a file is
                 replicated at several nodes, are discussed. It is
                 argued that the distribution and replication of data
                 should be transparent to the programs which use the
                 data. That is, the programming interface should provide
                 location transparency, replica transparency,
                 concurrency transparency, and failure transparency.
                 Techniques for providing such transparencies are
                 abstracted and discussed.\par

                 By extending the notions of system schedule and system
                 clock to handle multiple nodes, it is shown that a
                 distributed system can be modeled as a single
                 sequential execution sequence. This model is then used
                 to discuss simple techniques for implementing the
                 various forms of transparency.",
  acknowledgement = ack-nhfb,
  annote =       "This paper is a easy-to-read introduction to required
                 transparency in distributed database systems. 4
                 transparencies are chosen and explained here, namely
                 location transparency, replication transparency,
                 concurrency transparency, and failure transparency. The
                 transaction model adapted by the paper is fully
                 synchronous and 2 phase protocol is used to implement
                 concurrency transparency. The paper proves that if all
                 transaction executions are two-phase, any legal
                 execution of the transactions by a distributed system
                 will be equivalent to some serial execution of the
                 transactions by a system consisting of a single node
                 under the assumption that updates are synchronous. The
                 paper introduces special node-associated clock to prove
                 it. The paper also gives simple explanation about a
                 protocol to implement failure transparency using logs
                 and two-phase commit protocol.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; data partitioning; data
                 replication; recovery; TODS data replication, data
                 partitioning",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}",
}

@Article{Fagin:1982:SUR,
  author =       "Ronald Fagin and Alberto O. Mendelzon and Jeffrey D.
                 Ullman",
  title =        "A Simplified Universal Relation Assumption and its
                 Properties",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "343--360",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05 (68B15)",
  MRnumber =     "83k:68100",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/bibdb.bib; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p343-fagin/p343-fagin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p343-fagin/",
  abstract =     "One problem concerning the universal relation
                 assumption is the inability of known methods to obtain
                 a database scheme design in the general case, where the
                 real-world constraints are given by a set of
                 dependencies that includes embedded multivalued
                 dependencies. We propose a simpler method of describing
                 the real world, where constraints are given by
                 functional dependencies and a single join dependency.
                 The relationship between this method of defining the
                 real world and the classical methods is exposed. We
                 characterize in terms of hypergraphs those multivalued
                 dependencies that are the consequence of a given join
                 dependency. Also characterized in terms of hypergraphs
                 are those join dependencies that are equivalent to a
                 set of multivalued dependencies.",
  acknowledgement = ack-nhfb,
  annote =       "Constraints are functional dependencies and a single
                 join dependency.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "acyclic; database scheme; hypergraph; join dependency;
                 multivalued dependency; relational database",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1);
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Graph algorithms};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Trees}; Information Systems
                 --- Database Management --- Logical Design (H.2.1):
                 {\bf Normal forms}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Schema and
                 subschema}; Information Systems --- Information Storage
                 and Retrieval --- Information Search and Retrieval
                 (H.3.3): {\bf Query formulation}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Relational databases}",
}

@Article{Klug:1982:DVD,
  author =       "Anthony Klug and Rod Price",
  title =        "Determining {View} dependencies using tableaux",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "361--380",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05",
  MRnumber =     "83k:68103",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p361-klug/p361-klug.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p361-klug/",
  abstract =     "A relational database models some part of the real
                 world by a set of relations and a set of constraints.
                 The constraints model properties of the stored
                 information and must be maintained true at all times.
                 For views defined over physically stored (base)
                 relations, this is done by determining whether the view
                 constraints are logical consequences of base relation
                 constraints. A technique for determining such valid
                 view constraints is presented in this paper. A
                 generalization of the tableau chase is used. The idea
                 of the method is to generate a tableau for the
                 expression whose summary violates the test constraints
                 in a ``canonical'' way. The chase then tries to remove
                 this violation.\par

                 It is also shown how this method has applications to
                 schema design. Relations not in normal form or having
                 other deficiencies can be replaced by normal form
                 projections without losing the ability to represent all
                 constraint information.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "chase; dependencies; rational algebra; relational
                 model; tableaux; views TODS",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema}",
}

@Article{Dayal:1982:CTU,
  author =       "Umeshwar Dayal and Philip A. Bernstein",
  title =        "On the Correct Translation of Update Operations on
                 Relational Views",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "381--416",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05",
  MRnumber =     "83k:68099",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p381-dayal/p381-dayal.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p381-dayal/",
  abstract =     "Most relational database systems provide a facility
                 for supporting user views. Permitting this level of
                 abstraction has the danger, however, that update
                 requests issued by a user within the context of his
                 view may not translate correctly into equivalent
                 updates on the underlying database. The purpose of this
                 paper is to formalize the notion of update translation
                 and derive conditions under which translation
                 procedures will produce correct translations of view
                 updates.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "external schemata; relational databases; schema
                 mapping; update translation; user views",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data manipulation languages
                 (DML)}; Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Heterogeneous Databases (H.2.5): {\bf
                 Program translation**}; Computing Methodologies ---
                 Artificial Intelligence --- Automatic Programming
                 (I.2.2): {\bf Program transformation}",
}

@Article{Griffith:1982:TPR,
  author =       "Robert L. Griffith",
  title =        "Three Principles of Representation for Semantic
                 Networks",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "417--442",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p417-griffith/p417-griffith.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p417-griffith/",
  abstract =     "Semantic networks are so intuitive and easy to use
                 that they are often employed without much thought as to
                 the phenomenon of semantic nets themselves. Since they
                 are becoming more and more a tool of artificial
                 intelligence and now database technology, it is
                 appropriate to focus on the principles of semantic
                 nets. Such focus finds a harmonious and consistent base
                 which can increase the semantic quality and usefulness
                 of such nets. Three rules of representation are
                 presented which achieve greater conceptual simplicity
                 for users, simplifications in semantic net
                 implementations and maintenance, and greater
                 consistency across semantic net applications. These
                 rules, applied to elements of the net itself, reveal
                 how fundamental structures should be organized, and
                 show that the common labeled-edge semantic net can be
                 derived from a more primitive structure involving only
                 nodes and membership relationships (and special nodes
                 which represent names). Also, the correlation between
                 binary and $n$-ary relations is presented.",
  acknowledgement = ack-nhfb,
  annote =       "Semantic networks are employed without much thought.
                 They are becoming a tool of artificial intelligences
                 and database technology, principles of semantic nets.
                 Three rules of representation are presented. The common
                 labeled-edge semantic net can be derived from a more
                 primitive structure involving only nodes and membership
                 relationships.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  subject =      "Data --- Data Structures (E.1): {\bf Graphs and
                 networks}; Computing Methodologies --- Artificial
                 Intelligence --- Knowledge Representation Formalisms
                 and Methods (I.2.4): {\bf Semantic networks}",
}

@Article{Kim:1982:OSL,
  author =       "Won Kim",
  title =        "On Optimizing an {SQL-like} Nested Query",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "443--469",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p443-kim/p443-kim.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p443-kim/",
  abstract =     "SQL is a high-level nonprocedural data language which
                 has received wide recognition in relational databases.
                 One of the most interesting features of SQL is the
                 nesting of query blocks to an arbitrary depth. An
                 SQL-like query nested to an arbitrary depth is shown to
                 be composed of five basic types of nesting. Four of
                 them have not been well understood and more work needs
                 to be done to improve their execution efficiency.
                 Algorithms are developed that transform queries
                 involving these basic types of nesting into
                 semantically equivalent queries that are amenable to
                 efficient processing by existing query-processing
                 subsystems. These algorithms are then combined into a
                 coherent strategy for processing a general nested query
                 of arbitrary complexity.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "aggregate function; divide; join; nested query;
                 predicate; relational database; SQL queries TODS",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}",
}

@Article{Wong:1982:SAI,
  author =       "Eugene Wong",
  title =        "A Statistical Approach to Incomplete Information in
                 Database Systems",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "470--488",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05",
  MRnumber =     "83k:68108",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/bibdb.bib; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p470-wong/p470-wong.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p470-wong/",
  abstract =     "There are numerous situations in which a database
                 cannot provide a precise answer to some of the
                 questions that are posed. Sources of imprecision vary
                 and include examples such as recording errors,
                 incompatible scaling, and obsolete data. In many such
                 situations, considerable prior information concerning
                 the imprecision exists and can be exploited to provide
                 valuable information for queries to which no exact
                 answer can be given. The objective of this paper is to
                 provide a framework for doing so.",
  acknowledgement = ack-nhfb,
  annote =       "Sources of imprecision include recording errors,
                 incompatible scaling, and obsolete data. In many
                 situations considerable prior information concerning
                 the imprecision exists and can be exploited. This paper
                 provides a framework. Null values.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "incomplete information; missing values; null values;
                 TODS null values",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Schema and subschema}",
}

@Article{Zaniolo:1982:NNF,
  author =       "Carlo Zaniolo",
  title =        "A New Normal Form for the Design of Relational
                 Database Schemata",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "489--499",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05 (68B15)",
  MRnumber =     "83k:68109",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p489-zaniolo/p489-zaniolo.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p489-zaniolo/",
  abstract =     "This paper addresses the problem of database schema
                 design in the framework of the relational data model
                 and functional dependencies. It suggests that both
                 Third Normal Form (3NF) and Boyce-Codd Normal Form
                 (BCNF) supply an inadequate basis for relational schema
                 design. The main problem with 3NF is that it is too
                 forgiving and does not enforce the separation principle
                 as strictly as it should. On the other hand, BCNF is
                 incompatible with the principle of representation and
                 prone to computational complexity. Thus a new normal
                 form, which lies between these two and captures the
                 salient qualities of both is proposed. The new normal
                 form is stricter than 3NF, but it is still compatible
                 with the representation principle. First a simpler
                 definition of 3NF is derived, and the analogy of this
                 new definition to the definition of BCNF is noted. This
                 analogy is used to derive the new normal form. Finally,
                 it is proved that Bernstein's algorithm for schema
                 design synthesizes schemata that are already in the new
                 normal form.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database schema; functional dependencies; relational
                 model",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Normal forms}",
}

@Article{Lam:1982:CSA,
  author =       "K. Lam and C. T. Yu",
  title =        "A Clustered Search Algorithm Incorporating Arbitrary
                 Term Dependencies",
  journal =      j-TODS,
  volume =       "7",
  number =       "3",
  pages =        "500--508",
  month =        sep,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68H05",
  MRnumber =     "83k:68104",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/bibdb.bib; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-3/p500-lam/p500-lam.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-3/p500-lam/",
  abstract =     "The documents in a database are organized into
                 clusters, where each cluster contains similar documents
                 and a representative of these documents. A user query
                 is compared with all the representatives of the
                 clusters, and on the basis of such comparisons, those
                 clusters having many {\em close neighbors\/} with
                 respect to the query are selected for searching. This
                 paper presents an estimation of the number of close
                 neighbors in a cluster in relation to the given query.
                 The estimation takes into consideration the
                 dependencies between terms. It is demonstrated by
                 experiments that the estimate is accurate and the time
                 to generate the estimate is small.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Bahadur-Lazarsfeld expansion; clustered search; CTYU
                 TODS; generating polynomial; term dependencies",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- General (F.2.0); Mathematics of
                 Computing --- Discrete Mathematics --- Combinatorics
                 (G.2.1): {\bf Combinatorial algorithms}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Storage (H.3.2): {\bf File organization};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Clustering}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Retrieval models}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Search
                 process}",
}

@Article{Batory:1982:UMP,
  author =       "D. S. Batory and C. C. Gotlieb",
  title =        "A Unifying Model of Physical Databases",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "509--539",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p509-batory/p509-batory.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p509-batory/",
  abstract =     "A unifying model for the study of database performance
                 is proposed. Applications of the model are shown to
                 relate and extend important work concerning batched
                 searching, transposed files, index selection, dynamic
                 hash-based files, generalized access path structures,
                 differential files, network databases, and multifile
                 query processing.",
  acknowledgement = ack-nhfb,
  annote =       "See also \cite{Piwowarski:1985:CBS}.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, TODS decomposition; decomposition;
                 linksets; simple files; unifying model",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2)",
}

@Article{Aghili:1982:PGD,
  author =       "Houtan Aghili and Dennis G. Severance",
  title =        "Practical Guide to the Design of Differential Files
                 for Recovery of On-Line Databases",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "540--565",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p540-aghili/p540-aghili.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p540-aghili/",
  abstract =     "The concept of a differential file has previously been
                 proposed as an efficient means of collecting database
                 updates for on-line systems. This paper studies the
                 problem of database backup and recovery for such
                 systems, and presents an analytic model of their
                 operation. Five key design decisions are identified and
                 an optimization procedure for each is developed. A
                 design algorithm that quickly provides parameters for a
                 near-optimal differential file architecture is
                 provided.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "backup and recovery; data processing; database
                 maintenance; database systems; differential files;
                 hashing functions; numerical methods; optimization;
                 reorganization",
  subject =      "Data --- Data Storage Representations (E.2);
                 Mathematics of Computing --- Numerical Analysis (G.1);
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2); Information Systems ---
                 Database Management --- Database Administration
                 (H.2.7)",
}

@Article{Larson:1982:PAL,
  author =       "Per-{\AA}ke Larson",
  title =        "Performance Analysis of Linear Hashing with Partial
                 Expansions",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "566--587",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p566-larson/p566-larson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p566-larson/",
  abstract =     "Linear hashing with partial expansions is a new file
                 organization primarily intended for files which grow
                 and shrink dynamically. This paper presents a
                 mathematical analysis of the expected performance of
                 the new scheme. The following performance measures are
                 considered: length of successful and unsuccessful
                 searches, accesses required to insert or delete a
                 record, and the size of the overflow area. The
                 performance is cyclical. For all performance measures,
                 the necessary formulas are derived for computing the
                 expected performance at any point of a cycle and the
                 average over a cycle. Furthermore, the expected worst
                 case in connection with searching is analyzed. The
                 overall performance depends on several file parameters.
                 The numerical results show that for many realistic
                 parameter combinations the performance is expected to
                 be extremely good. Even the longest search is expected
                 to be of quite reasonable length.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "dynamic hashing schemes; extendible hashing; hashing;
                 linear hashing; TODS dynamic hashing, extendible
                 hashing, TODS dynamic hashing, extendible hashing, data
                 processing",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- Nonnumerical Algorithms and
                 Problems (F.2.2): {\bf Sorting and searching};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Babb:1982:JNF,
  author =       "E. Babb",
  title =        "Joined Normal Form: a Storage Encoding for Relational
                 Databases",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "588--614",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p588-babb/p588-babb.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p588-babb/",
  abstract =     "A new on-line query language and storage structure for
                 a database machine is presented. By including a
                 mathematical model in the interpreter the query
                 language has been substantially simplified so that no
                 reference to relation names is necessary. By storing
                 the model as a single joined normal form (JNF) file, it
                 has been possible to exploit the powerful search
                 capability of the Content Addressable File Store (CAFS;
                 CAFS is a registered trademark of International
                 Computers Limited) database machine.",
  acknowledgement = ack-nhfb,
  annote =       "prejoining for CAFS.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "CAFS; content addressing hardware; database systems;
                 functional dependencies; implication network; joined
                 normal form; joins; mathematical model; network;
                 queries; relational database; storage encoding tags;
                 storage encoding, TODS CAFS, third normal form; third
                 normal form; updates",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Search process}",
}

@Article{Heyman:1982:MMD,
  author =       "Daniel P. Heyman",
  title =        "Mathematical Models of Database Degradation",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "615--631",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p615-heyman/p615-heyman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p615-heyman/",
  abstract =     "As data are updated, the initial physical structure of
                 a database is changed and retrieval of specific pieces
                 of data becomes more time consuming. This phenomenon is
                 called database degradation. In this paper two models
                 of database degradation are described. Each model
                 refers to a different aspect of the problem.\par

                 It is assumed that transactions are statistically
                 independent and either add, delete, or update data. The
                 first model examines the time during which a block of
                 data is filling up. The second model examines the
                 overflows from a block of data, which essentially
                 describes the buildup of disorganization. Analytical
                 results are obtained for both models. In addition,
                 several numerical examples are presented which show
                 that the mean number of overflows grows approximately
                 linearly with time. This approximation is used to
                 devise a simple formula for the optimal time to
                 reorganize a stochastically growing database.",
  acknowledgement = ack-nhfb,
  classification = "723; 921",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data overflows; database degradation, TODS data
                 overflows; database systems; file organization;
                 mathematical models",
  subject =      "Computer Systems Organization --- Performance of
                 Systems (C.4): {\bf Modeling techniques}; Information
                 Systems --- Database Management --- Database
                 Administration (H.2.7): {\bf Logging and recovery}",
}

@Article{Korth:1982:DFU,
  author =       "Henry F. Korth",
  title =        "Deadlock Freedom Using Edge Locks",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "632--652",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p632-korth/p632-korth.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p632-korth/",
  abstract =     "We define a series of locking protocols for database
                 systems that all have three main features: freedom from
                 deadlock, multiple granularity, and support for general
                 collections of locking primitives. A rooted directed
                 acyclic graph is used to represent multiple
                 granularities, as in System R. Deadlock freedom is
                 guaranteed by extending the System R protocol to
                 require locks on edges of the graph in addition to the
                 locks required on nodes.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; database systems; locking;
                 serializability",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing}",
}

@Article{Goodman:1982:TQS,
  author =       "Nathan Goodman and Oded Shmueli",
  title =        "Tree Queries: a Simple Class of Relational Queries",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "653--677",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p653-goodman/p653-goodman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p653-goodman/",
  abstract =     "One can partition the class of relational database
                 schemas into tree schemas and cyclic schemas. (These
                 are called acyclic hypergraphs and cyclic hypergraphs
                 elsewhere in the literature.) This partition has
                 interesting implications in query processing,
                 dependency theory, and graph theory.\par

                 The tree/cyclic partitioning of database schemas
                 originated with a similar partition of equijoin
                 queries. Given an arbitrary equijoin query one can
                 obtain an equivalent query that calculates the natural
                 join of all relations in (an efficiently) derived
                 database; such a query is called a natural join (NJ)
                 query. If the derived database is a tree schema the
                 original query is said to be a tree query, and
                 otherwise a cyclic query.\par

                 In this paper we analyze query processing consequences
                 of the tree/cyclic partitioning. We are able to argue,
                 qualitatively, that queries which imply a tree schema
                 are easier to process than those implying a cyclic
                 schema. Our results also extend the study of the
                 semijoin operator.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "acyclic schemes; cyclic schemas; database systems;
                 join; semijoin; tree queries; tree schemas",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}",
}

@Article{Kerschberg:1982:QOS,
  author =       "Larry Kerschberg and Peter D. Ting and S. Bing Yao",
  title =        "Query Optimization in Star Computer Networks",
  journal =      j-TODS,
  volume =       "7",
  number =       "4",
  pages =        "678--711",
  month =        dec,
  year =         "1982",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1982-7-4/p678-kerschberg/p678-kerschberg.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1982-7-4/p678-kerschberg/",
  abstract =     "Query processing is investigated for relational
                 databases distributed over several computers organized
                 in a star network. Minimal response-time processing
                 strategies are presented for queries involving the
                 select, project, and join commands. These strategies
                 depend on system parameters such as communication costs
                 and different machine processing speeds; database
                 parameters such as relation cardinality and file size;
                 and query parameters such as estimates of the size and
                 number of tuples in the result relation. The optimal
                 strategies specify relation preparation processes, the
                 shipping strategy, serial or parallel processing, and,
                 where applicable, the site of join filtering and
                 merging. Strategies for optimizing select and join
                 queries have been implemented and tested.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer networks; database systems; query
                 optimization; relational database system; star computer
                 network",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4): {\bf Distributed applications}; Computer
                 Systems Organization --- Computer-Communication
                 Networks --- Distributed Systems (C.2.4): {\bf
                 Distributed databases}; Computer Systems Organization
                 --- Performance of Systems (C.4): {\bf Design studies};
                 Computer Systems Organization --- Performance of
                 Systems (C.4): {\bf Modeling techniques}; Software ---
                 Operating Systems --- File Systems Management (D.4.3):
                 {\bf Distributed file systems}; Software --- Operating
                 Systems --- Organization and Design (D.4.7): {\bf
                 Distributed systems}; Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Access
                 methods}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Distributed databases}",
}

@Article{Maier:1983:MOS,
  author =       "David Maier and Jeffrey D. Ullman",
  title =        "Maximal Objects and the Semantics of Universal
                 Relation Databases",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "1--14",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  note =         "Also published in/as: SUNY, Stony Brook, CS, TR
                 80/016, 1980.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p1-maier/p1-maier.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p1-maier/",
  abstract =     "The universal relation concept is intended to provide
                 the database user with a simplified model in which he
                 can compose queries without regard to the underlying
                 structure of the relations in the database. Frequently,
                 the lossless join criterion provides the query
                 interpreter with the clue needed to interpret the query
                 as the user intended. However, some examples exist
                 where interpretation by the lossless-join rule runs
                 contrary to our intuition. To handle some of these
                 cases, we propose a concept called {\em maximal
                 objects}, which modifies the universal relation concept
                 in exactly those situations where it appears to go awry
                 --- when the underlying relational structure has
                 ``cycles.'' We offer examples of how the maximal object
                 concept provides intuitively correct interpretations.
                 We also consider how one might construct maximal
                 objects mechanically from purely syntactic structural
                 information --- the relation schemes and functional
                 dependencies --- about the database.",
  acknowledgement = ack-nhfb,
  annote =       "A universal relation is represented by a hypergraph.
                 If the hypergraph is cyclic, some queries can be
                 evaluated in different ways; restricting navigation to
                 few acyclic components (maximal objects) gives
                 intuitively correct answers.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "acyclic hypergraph; database systems; relational
                 database; universal relation",
  subject =      "Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Path and circuit problems};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Data description languages (DDL)}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Data manipulation languages (DML)}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf Query
                 processing}",
}

@Article{Haskin:1983:OCH,
  author =       "Roger L. Haskin and Lee A. Hollaar",
  title =        "Operational Characteristics of a Hardware-Based
                 Pattern Matcher",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "15--40",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p15-haskin/p15-haskin.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p15-haskin/",
  abstract =     "The design and operation of a new class of
                 hardware-based pattern matchers, such as would be used
                 in a backended database processor in a full-text or
                 other retrieval system, is presented. This recognizer
                 is based on a unique implementation technique for
                 finite state automata consisting of partitioning the
                 state table among a number of simple digital machines.
                 It avoids the problems generally associated with
                 implementing finite state machines, such as large state
                 table memories, complex control mechanisms, and state
                 encodings. Because it consists primarily of memory,
                 with its high regularity and density, needs only
                 limited static interconnections, and operates at a
                 relatively low speed, it can be easily constructed
                 using integrated circuit techniques.\par

                 After a brief discussion of other pattern-matching
                 hardware, the structure and operation of the
                 partitioned finite state automaton is given, along with
                 a simplified discussion of how the state tables are
                 partitioned. The expected performance of the resulting
                 system and the state table partitioning programs is
                 then discussed.",
  acknowledgement = ack-nhfb,
  classification = "723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "backend processors; computer system architecture;
                 database systems; finite state automata; full text
                 retrieval systems; information science; text
                 searching",
  subject =      "Hardware --- Logic Design --- Design Styles (B.6.1):
                 {\bf Cellular arrays and automata}; Hardware ---
                 Integrated Circuits --- Types and Design Styles
                 (B.7.1): {\bf Algorithms implemented in hardware};
                 Information Systems --- Database Management ---
                 Database Machines (H.2.6); Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Search process}",
}

@Article{Sicherman:1983:AQR,
  author =       "George L. Sicherman and Wiebren {De Jonge} and Reind
                 P. {Van De Riet}",
  title =        "Answering Queries without Revealing Secrets",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "41--59",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  note =         "Also published in/as: reprinted in deJonge thesis,
                 Jun. 1985.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p41-sicherman/p41-sicherman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p41-sicherman/",
  abstract =     "Question-answering systems must often keep certain
                 information secret. This can be accomplished, for
                 example, by sometimes refusing to answer a query. Here
                 the danger of revealing a secret by refusing to answer
                 a query is investigated. First several criteria that
                 can be used to decide whether or not to answer a query
                 are developed. First several criteria that can be used
                 to decide whether or not to answer a query are
                 developed. Then it is shown which of these criteria are
                 safe if the questioner knows nothing at all about what
                 is kept secret. Furthermore, it is proved that one of
                 these criteria is safe even if the user of the system
                 knows which information is to be kept secret.",
  acknowledgement = ack-nhfb,
  acmcrnumber =  "8404-296",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, statistical security TODS; keeping
                 secrets; refusal to answer; strategy",
  subject =      "Information Systems --- Models and Principles ---
                 Systems and Information Theory (H.1.1): {\bf Value of
                 information}; Information Systems --- Models and
                 Principles --- User/Machine Systems (H.1.2);
                 Information Systems --- Information Storage and
                 Retrieval --- Systems and Software (H.3.4): {\bf
                 Current awareness systems (selective dissemination of
                 information--SDI)**}; Information Systems ---
                 Information Storage and Retrieval --- Systems and
                 Software (H.3.4): {\bf Question-answering (fact
                 retrieval) systems**}; Computing Methodologies ---
                 Artificial Intelligence --- Deduction and Theorem
                 Proving (I.2.3): {\bf Answer/reason extraction}",
}

@Article{deJonge:1983:CSD,
  author =       "Wiebren de Jonge",
  title =        "Compromising Statistical Databases Responding to
                 Queries About Means",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "60--80",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  note =         "Also published in/as: reprinted in Jun. 1985 thesis",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p60-de_jonge/p60-de_jonge.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p60-de_jonge/",
  abstract =     "This paper describes how to compromise a statistical
                 database which only answers queries about arithmetic
                 means for query sets whose cardinality falls in the
                 range $ [k, N - k] $, for some $k$ greater than $0$,
                 where $N$ greater than equivalent to $ 2 k $ is the
                 number of records in the database. The compromise is
                 shown to be easy and to require only a little
                 preknowledge; knowing the cardinality of just one
                 nonempty query set is usually sufficient.\par

                 This means that not only count and sum queries, but
                 also queries for arithmetic means can be extremely
                 dangerous for the security of a statistical database,
                 and that this threat must be taken into account
                 explicitly by protective measures. This seems quite
                 important from a practical standpoint: while arithmetic
                 means were known for some time to be not altogether
                 harmless, the (perhaps surprising) extent of the threat
                 is now shown.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compromise; database security; database systems;
                 security TODS; statistical databases",
  subject =      "Information Systems --- Models and Principles ---
                 Systems and Information Theory (H.1.1): {\bf Value of
                 information}; Information Systems --- Models and
                 Principles --- User/Machine Systems (H.1.2);
                 Information Systems --- Information Storage and
                 Retrieval --- Systems and Software (H.3.4): {\bf
                 Question-answering (fact retrieval) systems**}",
}

@Article{Graham:1983:FD,
  author =       "Marc H. Graham",
  title =        "Functions in Databases",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "81--109",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "85a:68036",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p81-graham/p81-graham.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p81-graham/",
  abstract =     "We discuss the objectives of including functional
                 dependencies in the definition of a relational
                 database. We find two distinct objectives. The
                 appearance of a dependency in the definition of a
                 database indicates that the states of the database are
                 to encode a function. A method based on the chase of
                 calculating the function encoded by a particular state
                 is given and compared to methods utilizing derivations
                 of the dependency. A test for deciding whether the
                 states of a schema may encode a nonempty function is
                 presented as is a characterization of the class of
                 schemas which are capable of encoding nonempty
                 functions for all the dependencies in the definition.
                 This class is the class of dependency preserving
                 schemas as defined by Beeri et al. and is strictly
                 larger than the class presented by Bernstein.\par

                 The second objective of including a functional
                 dependency in the definition of a database is that the
                 dependency be capable of constraining the states of the
                 database; that is, capable of uncovering input errors
                 made by the users. We show that this capability is
                 weaker than the first objective; thus, even
                 dependencies whose functions are everywhere empty may
                 still act as constraints. Bounds on the requirements
                 for a dependency to act as a constraint are derived.
                 \par

                 These results are founded on the notion of a weak
                 instance for a database state, which replaces the
                 universal relation instance assumption and is both
                 intuitively and computationally more nearly
                 acceptable.",
  acknowledgement = ack-nhfb,
  annote =       "Method based on the chase of calculating the function
                 is given; the dependency should constrain the states of
                 the database; many algorithms.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "functional dependencies; tableaux; TODS functional
                 dependencies, tableaux, database systems",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Normal forms}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Schema and subschema}",
}

@Article{Katz:1983:RCG,
  author =       "R. H. Katz and E. Wong",
  title =        "Resolving Conflicts in Global Storage Design Through
                 Replication",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "110--135",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p110-katz/p110-katz.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p110-katz/",
  abstract =     "We present a conceptual framework in which a
                 database's intra- and interrecord set access
                 requirements are specified as a constrained assignment
                 of abstract characteristics (``evaluated,''
                 ``indexed,'' ``clustered,'' ``well-placed'') to logical
                 access paths. We derive a physical schema by choosing
                 an available storage structure that most closely
                 provides the desired access characteristics. We use
                 explicit replication of schema objects to reduce the
                 access cost along certain paths, and analyze the
                 trade-offs between increased update overhead and
                 improved retrieval access. Finally, we given an
                 algorithm to select storage structures for a CODASYL 78
                 DBTG schema, given its access requirements
                 specification.",
  acknowledgement = ack-nhfb,
  annote =       "Access path data model deduced from the
                 Entity-Relationship Model. Each function is augmented
                 with access characteristics, evaluated, indexed,
                 clustered, and well-placed.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access path selection; database systems, TODS
                 functional data model; functional data model; storage
                 structure choice",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Physical Design
                 (H.2.2): {\bf Access methods}",
}

@Article{Lomet:1983:BIE,
  author =       "David B. Lomet",
  title =        "Bounded Index Exponential Hashing",
  journal =      j-TODS,
  volume =       "8",
  number =       "1",
  pages =        "136--165",
  month =        mar,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-1/p136-lomet/p136-lomet.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-1/p136-lomet/",
  abstract =     "Bounded index exponential hashing, a new form of
                 extendible hashing, is described. It has the important
                 advantages over most of the other extendible hashing
                 variants of both (i) providing random access to any
                 record of a file in close to one disk access and (ii)
                 having performance which does not vary with file size.
                 It is straightforward to implement and demands only a
                 fixed and specifiable amount of main storage to achieve
                 this performance. Its underlying physical disk storage
                 is readily managed and record overflow is handled so as
                 to insure that unsuccessful searches never take more
                 than two accesses. The method's ability to access data
                 in close to a single disk access makes it possible to
                 organize a database, in which files have a primary key
                 and multiple secondary keys, such that the result is a
                 significant performance advantage over existing
                 organizations.",
  acknowledgement = ack-nhfb,
  classification = "722",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing, TODS extendible hashing; extendible
                 hasing; tree index methods",
  subject =      "Software --- Operating Systems --- File Systems
                 Management (D.4.3): {\bf File organization}; Data ---
                 Data Storage Representations (E.2): {\bf Hash-table
                 representations}; Information Systems --- Information
                 Storage and Retrieval --- Information Storage (H.3.2):
                 {\bf File organization}",
}

@Article{Stonebraker:1983:PER,
  author =       "Michael Stonebraker and John Woodfill and Jeff
                 Ranstrom and Marguerite Murphy and Marc Meyer and Eric
                 Allman",
  title =        "Performance Enhancements to a Relational Database
                 System",
  journal =      j-TODS,
  volume =       "8",
  number =       "2",
  pages =        "167--185",
  month =        jun,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-2/p167-stonebraker/p167-stonebraker.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-2/p167-stonebraker/",
  abstract =     "In this paper we examine four performance enhancements
                 to a database management system: dynamic compilation,
                 microcoded routines, a special-purpose file system, and
                 a special-purpose operating system. All were examined
                 in the context of the INGRES database management
                 system. Benchmark timings that are included suggest the
                 attractiveness of dynamic compilation and a
                 special-purpose file system. Microcode and a
                 special-purpose operating system are analyzed and
                 appear to be of more limited utility in the INGRES
                 context.",
  acknowledgement = ack-nhfb,
  annote =       "Estimates are given for compilation, micro-coding, a
                 file system which supports locality, and a specialized
                 operating system for INGRES.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "compiled query languages; database performance;
                 database systems, TODS dynamic compilation microcode
                 special purpose file operating system; file systems for
                 databases; microcode",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0); Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Information Systems --- Database Management ---
                 Database Machines (H.2.6)",
}

@Article{Garcia-Molina:1983:USK,
  author =       "H{\'e}ctor Garc{\'\i}a-Molina",
  title =        "Using Semantic Knowledge for Transaction Processing in
                 a Distributed Database",
  journal =      j-TODS,
  volume =       "8",
  number =       "2",
  pages =        "186--213",
  month =        jun,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-2/p186-garcia-molina/p186-garcia-molina.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-2/p186-garcia-molina/",
  abstract =     "This paper investigates how the semantic knowledge of
                 an application can be used in a distributed database to
                 process transactions efficiently and to avoid some of
                 the delays associated with failures. The main idea is
                 to allow nonserializable schedules which preserve
                 consistency and which are acceptable to the system
                 users. To produce such schedules, the transaction
                 processing mechanism receives semantic information from
                 the users in the form of transaction semantic types, a
                 division of transactions into steps, compatibility
                 sets, and countersteps. Using these notions, we propose
                 a mechanism which allows users to exploit their
                 semantic knowledge in an organized fashion. The
                 strengths and weaknesses of this approach are
                 discussed.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; consistency; database systems;
                 locking; schedule; semantic knowledge;
                 serializability",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4): {\bf Distributed databases}; Information
                 Systems --- Database Management --- Systems (H.2.4);
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}",
}

@Article{Clifford:1983:FST,
  author =       "James Clifford and David S. Warren",
  title =        "Formal Semantics for Time in Databases",
  journal =      j-TODS,
  volume =       "8",
  number =       "2",
  pages =        "214--254",
  month =        jun,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/ai.misc.bib; Compendex database;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-2/p214-clifford/p214-clifford.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-2/p214-clifford/",
  abstract =     "The concept of a historical database is introduced as
                 a tool for modeling the dynamic nature of some part of
                 the real world. Just as first-order logic has been
                 shown to be a useful formalism for expressing and
                 understanding the underlying semantics of the
                 relational database model, intensional logic is
                 presented as an analogous formalism for expressing and
                 understanding the temporal semantics involved in a
                 historical database. The various components of the
                 relational model, as extended to include historical
                 relations, are discussed in terms of the model theory
                 for the logic IL//s, a variation of the logic IL
                 formulated by Richard Montague. The modal concepts of
                 intensional and extensional data constraints and
                 queries are introduced and contrasted. Finally, the
                 potential application of these ideas to the problem of
                 natural language database querying is discussed.",
  acknowledgement = ack-nhfb,
  annote =       "all timeslices are represented. No inference needed.
                 Storage could be huge. Uses term historical db versus
                 temporal db. Intensional Montague logic. Two
                 timestamps: `state' and `exist'",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems; entity-relationship model;
                 historical databases; intensional logic; relational
                 database; temporal semantics",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}",
}

@Article{Bitton:1983:DRE,
  author =       "Dina Bitton and David J. DeWitt",
  title =        "Duplicate Record Elimination in Large Data Files",
  journal =      j-TODS,
  volume =       "8",
  number =       "2",
  pages =        "255--265",
  month =        jun,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-2/p255-bitton/p255-bitton.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-2/p255-bitton/",
  abstract =     "The issue of duplicate elimination for large data
                 files in which many occurrences of the same record may
                 appear is addressed. A comprehensive cost analysis of
                 the duplicate elimination operation is presented. This
                 analysis is based on a combinatorial model developed
                 for estimating the size of intermediate runs produced
                 by a modified merge-sort procedure. The performance of
                 this modified merge-sort procedure is demonstrated to
                 be significantly superior to the standard duplicate
                 elimination technique of sorting followed by a
                 sequential pass to locate duplicate records. The
                 results can also be used to provide critical input to a
                 query optimizer in a relational database system.",
  acknowledgement = ack-nhfb,
  annote =       "use a modified sort-merge.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, early aggregation TODS; duplicate
                 elimination; projection operator; sorting",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@Article{Sagiv:1983:CGC,
  author =       "Yehoshua Sagiv",
  title =        "A Characterization of Globally Consistent Databases
                 and Their Correct Access Paths",
  journal =      j-TODS,
  volume =       "8",
  number =       "2",
  pages =        "266--286",
  month =        jun,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/prolog.1.bib; Compendex database;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-2/p266-sagiv/p266-sagiv.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-2/p266-sagiv/",
  abstract =     "The representative instance is proposed as a
                 representation of the data stored in a database whose
                 relations are not the projections of a universal
                 instance. Database schemes are characterized for which
                 local consistency implies global consistency. (Local
                 consistency means that each relation satisfies its own
                 functional dependencies; global consistency means that
                 the representative instance satisfies all the
                 functional dependencies). A method of efficiently
                 computing projections of the representative instance is
                 given, provided that local consistency implies global
                 consistency. Throughout, it is assumed that a cover of
                 the functional dependencies is embodied in the database
                 scheme in the form of keys.",
  acknowledgement = ack-nhfb,
  annote =       "Inter-relational consistency based on FD's",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "chase; database systems, TODS chase, universal
                 relation scheme, extension join, relational algebra;
                 extension join; functional dependency; null value;
                 prolog; relational algebra; relational database;
                 representative instance; universal relation scheme",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1);
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Normal forms}; Information Systems
                 --- Database Management --- Logical Design (H.2.1):
                 {\bf Schema and subschema}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Query formulation}",
}

@Article{Ullman:1983:CTJ,
  author =       "Jeffrey D. Ullman",
  title =        "Corrigendum: The Theory of Joins in Relational
                 Databases",
  journal =      j-TODS,
  volume =       "8",
  number =       "2",
  pages =        "287--287",
  month =        jun,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibsource =    "Database/Graefe.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See \cite{Aho:1979:TJR}.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
}

@Article{Cardenas:1983:PRA,
  author =       "Alfonso F. Cardenas and Farid Alavian and Algirdas
                 Avizienis",
  title =        "Performance of Recovery Architectures in Parallel
                 Associative Database Processors",
  journal =      j-TODS,
  volume =       "8",
  number =       "3",
  pages =        "291--323",
  month =        sep,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-3/p291-cardenas/p291-cardenas.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-3/p291-cardenas/",
  abstract =     "The need for robust recovery facilities in modern
                 database management systems is quite well known.
                 Various authors have addressed recovery facilities and
                 specific techniques, but none have delved into the
                 problem of recovery in database machines. In this
                 paper, the types of undesirable events that occur in a
                 database environment are classified and the necessary
                 recovery information, with subsequent actions to
                 recover the correct state of the database, is
                 summarized. A model of the ``processor-per-track''
                 class of parallel associative database processor is
                 presented. Three different types of recovery mechanisms
                 that may be considered for parallel associative
                 database processors are identified. For each
                 architecture, both the workload imposed by the recovery
                 mechanisms on the execution of database operations
                 (i.e., retrieve, modify, delete, and insert) and the
                 workload involved in the recovery actions (i.e.,
                 rollback, restart, restore, and reconstruct) are
                 analyzed. The performance of the three architectures is
                 quantitatively compared. This comparison is made in
                 terms of the number of extra revolutions of the
                 database area required to process a transaction versus
                 the number of records affected by a transaction. A
                 variety of different design parameters of the database
                 processor, of the database, and of a mix of transaction
                 types (modify, insert, and delete) are considered. A
                 large number of combinations is selected and the
                 effects of the parameters on the extra processing time
                 are identified.",
  acknowledgement = ack-nhfb,
  annote =       "three methods of recovery in logic-per-track
                 processors are analyzed.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative database processors; database systems,
                 hardware support machine TODS",
  subject =      "Computer Systems Organization --- Performance of
                 Systems (C.4): {\bf Performance attributes};
                 Information Systems --- Database Management ---
                 Database Machines (H.2.6); Information Systems ---
                 Database Management --- Database Administration
                 (H.2.7): {\bf Logging and recovery}",
}

@Article{Bitton:1983:PAE,
  author =       "Dina Bitton and Haran Boral and David J. DeWitt and W.
                 Kevin Wilkinson",
  title =        "Parallel Algorithms for the Execution of Relational
                 Database Operations",
  journal =      j-TODS,
  volume =       "8",
  number =       "3",
  pages =        "324--353",
  month =        sep,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-3/p324-bitton/p324-bitton.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-3/p324-bitton/",
  abstract =     "This paper presents and analyzes algorithms for
                 parallel processing of relational database operations
                 in a general multiprocessor framework. To analyze
                 alternative algorithms, we introduce an analysis
                 methodology which incorporates I/O, CPU, and message
                 costs and which can be adjusted to fit different
                 multiprocessor architectures. Algorithms are presented
                 and analyzed for sorting, projection, and join
                 operations. While some of these algorithms have been
                 presented and analyzed previously, we have generalized
                 each in order to handle the case where the number of
                 pages is significantly larger than the number of
                 processors. In addition, we present and analyze
                 algorithms for the parallel execution of update and
                 aggregate operations.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "aggregate operations; database machines; database
                 systems; join operation; parallel processing;
                 projection operator; sorting",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Database Machines
                 (H.2.6)",
}

@Article{Eager:1983:ARD,
  author =       "Derek L. Eager and Kenneth C. Sevcik",
  title =        "Achieving Robustness in Distributed Database Systems",
  journal =      j-TODS,
  volume =       "8",
  number =       "3",
  pages =        "354--381",
  month =        sep,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib;
                 Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-3/p354-eager/p354-eager.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-3/p354-eager/",
  abstract =     "The problem of concurrency control in distributed
                 database systems in which site and communication link
                 failures may occur is considered. The possible range of
                 failures is not restricted; in particular, failures may
                 induce an arbitrary network partitioning. It is
                 desirable to attain a high ``level of robustness'' in
                 such a system; that is, these failures should have only
                 a small impact on system operation.\par

                 A level of robustness termed {\em maximal partial
                 operability\/} is identified. Under our models of
                 concurrency control and robustness, this robustness
                 level is the highest level attainable without
                 significantly degrading performance.\par

                 A basis for the implementation of maximal partial
                 operability is presented. To illustrate its use, it is
                 applied to a distributed locking concurrency control
                 method and to a method that utilizes timestamps. When
                 no failures are present, the robustness modifications
                 for these methods induce no significant additional
                 overhead.",
  acknowledgement = ack-nhfb,
  annote =       "Three phases: 1. read and write to a private
                 workspace, 2. indicate intention to commit, restart, or
                 abort, 3. if verified complete actual transaction.
                 Intention to update from phase 2 is withdrawn if abort
                 or restart is the end result of phase 2. Two versions
                 are presented, either the data are transmitted in phase
                 3, or, if handled as phase 2 of two-phase commit
                 protocol, the data are held in secure storage from
                 phase 2 to phase 3. In case of partitioning, voting
                 (ref. Gifford) is used, but to prevent broad lockouts,
                 those transactions which cannot update all copies must
                 post this failure at a quorum of nodes, so that
                 successor transactions can test that they do not
                 conflict, and do not enter phase 3. Dangling precommits
                 may be resolved by checking other nodes in the
                 partition. On re-establishing the net, acyclicness is
                 created by possibly restarting transactions which led
                 to a cycle in the combined schedule.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; database systems; network
                 partitioning; robustness; serializability",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2); Information Systems ---
                 Database Management --- Systems (H.2.4); Information
                 Systems --- Database Management --- Database
                 Administration (H.2.7)",
}

@Article{Trueblood:1983:MMM,
  author =       "Robert P. Trueblood and H. Rex Hartson and Johannes J.
                 Martin",
  title =        "{MULTISAFE} --- {A} Modular Multiprocessing Approach
                 to Secure Database Management",
  journal =      j-TODS,
  volume =       "8",
  number =       "3",
  pages =        "382--409",
  month =        sep,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-3/p382-trueblood/p382-trueblood.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-3/p382-trueblood/",
  abstract =     "This paper describes the configuration and intermodule
                 communication of a MULTImodule system for supporting
                 Secure Authorization with Full Enforcement (MULTISAFE)
                 for database management. A modular architecture is
                 described which provides secure, controlled access to
                 shared data in a multiuser environment, with low
                 performance penalties, even for complex protection
                 policies. The primary mechanisms are structured and
                 verifiable. The entire approach is immediately
                 extendible to distributed protection of distributed
                 data. The system includes a user and applications
                 module (UAM), a data storage and retrieval module
                 (SRM), and a protection and security module (PSM). The
                 control of intermodule communication is based on a data
                 abstraction approach, initially described in terms of
                 function invocations. An implementation within a formal
                 message system is then described. The discussion of
                 function invocations begins with the single terminal
                 case and extends to the multiterminal case. Some
                 physical implementation aspects are also discussed, and
                 some examples of message sequences are given.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "abstract data types; access control; back-end
                 database; database systems; intermodule communication;
                 secure database; TODS",
  subject =      "Computer Systems Organization --- Processor
                 Architectures --- Multiple Data Stream Architectures
                 (Multiprocessors) (C.1.2); Software --- Operating
                 Systems --- Storage Management (D.4.2); Software ---
                 Operating Systems --- Security and Protection (D.4.6);
                 Information Systems --- Database Management (H.2);
                 Information Systems --- Database Management --- Systems
                 (H.2.4); Information Systems --- Database Management
                 --- Database Machines (H.2.6)",
}

@Article{Ito:1983:HFO,
  author =       "Tetsuro Ito and Makoto Kizawa",
  title =        "Hierarchical File Organization and its Application to
                 Similar-String Matching",
  journal =      j-TODS,
  volume =       "8",
  number =       "3",
  pages =        "410--433",
  month =        sep,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/spell.bib;
                 http://www.math.utah.edu/pub/tex/bib/string-matching.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 http://www.math.utah.edu/pub/tex/bib/unix.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-3/p410-ito/p410-ito.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-3/p410-ito/",
  abstract =     "The automatic correction of misspelled inputs is
                 discussed from a viewpoint of similar-string matching.
                 First a hierarchical file organization based on a
                 linear ordering of records is presented for retrieving
                 records highly similar to any input query. Then the
                 spelling problem is attacked by constructing a
                 hierarchical file for a set of strings in a dictionary
                 of English words. The spelling correction steps proceed
                 as follows: (1) find one of the best-match strings
                 which are most similar to a query, (2) expand the
                 search area for obtaining the good-match strings, and
                 (3) interrupt the file search as soon as the required
                 string is displayed. Computational experiments verify
                 the performance of the proposed methods for
                 similar-string matching under the UNIX time-sharing
                 system.",
  acknowledgement = ack-nhfb,
  annote =       "A spelling checker to provide possible correct
                 spellings for all possible words. Results are quite
                 sketchy",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "best match; data processing, algorithms;
                 experimentation; file organization; good match;
                 hierarchical clustering; linear ordering; measurement;
                 office automation; performance; similar-string;
                 similarity; spelling correction; text editor; theory;
                 verification",
  review =       "ACM CR 8408-0665",
  subject =      "I.2 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Natural Language Processing \\ I.5.4 Computing
                 Methodologies, PATTERN RECOGNITION, Applications, Text
                 processing \\ E.5 Data, FILES, Organization/structure
                 \\ H.3.2 Information Systems, INFORMATION STORAGE AND
                 RETRIEVAL, Information Storage, File organization \\
                 H.3.3 Information Systems, INFORMATION STORAGE AND
                 RETRIEVAL, Information Search and Retrieval, Search
                 process \\ H.3.3 Information Systems, INFORMATION
                 STORAGE AND RETRIEVAL, Information Search and
                 Retrieval, Selection process \\ H.4 Information
                 Systems, INFORMATION SYSTEMS APPLICATIONS, Office
                 Automation",
}

@Article{Kolodner:1983:IRS,
  author =       "Janet L. Kolodner",
  title =        "Indexing and Retrieval Strategies for Natural Language
                 Fact Retrieval",
  journal =      j-TODS,
  volume =       "8",
  number =       "3",
  pages =        "434--464",
  month =        sep,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/ai.misc.bib; Compendex database;
                 Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-3/p434-kolodner/p434-kolodner.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-3/p434-kolodner/",
  abstract =     "Researchers in artificial intelligence have recently
                 become interested in natural language fact retrieval;
                 currently, their research is at a point where it can
                 begin contributing to the field of Information
                 Retrieval. In this paper, strategies for a natural
                 language fact retrieval system are mapped out, and
                 approaches to many of the organization and retrieval
                 problems are presented. The CYRUS system, which keeps
                 track of important people and is queried in English, is
                 presented and used to illustrate those solutions.",
  acknowledgement = ack-nhfb,
  classification = "723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "artificial intelligence; conceptual memory; database
                 retrieval; fact retrieval; information science; natural
                 language processing; question answering",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Content Analysis and Indexing (H.3.1);
                 Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3);
                 Computing Methodologies --- Artificial Intelligence ---
                 Knowledge Representation Formalisms and Methods
                 (I.2.4)",
}

@Article{Bernstein:1983:MCC,
  author =       "Philip A. Bernstein and Nathan Goodman",
  title =        "Multiversion Concurrency Control --- Theory and
                 Algorithms",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "465--483",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "86m:68025",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/Discrete.event.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p465-bernstein/p465-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p465-bernstein/",
  abstract =     "Concurrency control is the activity of synchronizing
                 operations issued by concurrently executing programs on
                 a shared database. The goal is to produce an execution
                 that has the same effect as a serial (noninterleaved)
                 one. In a multiversion database system, each write on a
                 data item produces a new copy (or {\em version\/}) of
                 that data item. This paper presents a theory for
                 analyzing the correctness of concurrency control
                 algorithms for multiversion database systems. We use
                 the theory to analyze some new algorithms and some
                 previously published ones.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- Algorithms; database systems;
                 transaction processing",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4)",
}

@Article{Lynch:1983:MAN,
  author =       "Nancy A. Lynch",
  title =        "Multilevel Atomicity --- {A} New Correctness Criterion
                 for Database Concurrency Control",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "484--502",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "86j:68022",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p484-lynch/p484-lynch.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p484-lynch/",
  abstract =     "{\em Multilevel atomicity}, a new correctness criteria
                 for database concurrency control, is defined. It
                 weakens the usual notion of serializability by
                 permitting controlled interleaving among transactions.
                 It appears to be especially suitable for applications
                 in which the set of transactions has a natural
                 hierarchical structure based on the hierarchical
                 structure of an organization. A characterization for
                 multilevel atomicity, in terms of the absence of cycles
                 in a dependency relation among transaction steps, is
                 given. Some remarks are made concerning
                 implementation.",
  acknowledgement = ack-nhfb,
  annote =       "A weaker level of concurrency control than transaction
                 serializability, a generalization of Garc{\'\i}a-Molina
                 [1981].",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "atomicity; breakpoint; database systems; transaction",
  subject =      "Software --- Programming Languages --- Language
                 Constructs and Features (D.3.3): {\bf Concurrent
                 programming structures}",
}

@Article{Hecht:1983:SMF,
  author =       "Matthew S. Hecht and John D. Gabbe",
  title =        "Shadowed Management of Free Disk Pages with a Linked
                 List",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "503--514",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p503-hecht/p503-hecht.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p503-hecht/",
  abstract =     "We describe and prove correct a programming technique
                 using a linked list of pages for managing the free disk
                 pages of a file system where shadowing is the recovery
                 technique. Our technique requires a window of only two
                 pages of main memory for accessing and maintaining the
                 free list, and avoids wholesale copying of free-list
                 pages during a {\em checkpoint\/} or {\em recover\/}
                 operation.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "checkpoint; computer operating systems; computer
                 programming; database systems; dynamic storage
                 allocation; file system; recovery; shadowing; storage
                 management",
  subject =      "Software --- Operating Systems --- Storage Management
                 (D.4.2): {\bf Allocation/deallocation strategies};
                 Software --- Operating Systems --- Reliability (D.4.5):
                 {\bf Checkpoint/restart}",
}

@Article{Malhotra:1983:EIA,
  author =       "A. Malhotra and H. M. Markowitz and D. P. Pazel",
  title =        "{EAS-E}: An Integrated Approach to Application
                 Development",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "515--542",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p515-malhotra/p515-malhotra.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p515-malhotra/",
  abstract =     "{\em EAS-E\/} (pronounced EASY) is an experimental
                 programming language integrated with a database
                 management system now running on VM/370 at the IBM
                 Thomas J. Watson Research Center. The EAS-E programming
                 language is built around the entity, attribute, and set
                 ({\em EAS\/}) view of application development. It
                 provides a means for translating operations on EAS
                 structures directly into executable code. EAS-E
                 commands have an English-like syntax, and thus EAS-E
                 programs are easy to read and understand. EAS-E
                 programs are also more compact than equivalent programs
                 in other database languages.\par

                 The EAS-E database management system allows many users
                 simultaneous access to the database. It supports
                 locking and deadlock detection and is capable of
                 efficiently supporting network databases of various
                 sizes including very large databases, consisting of
                 several millions of entities stored on multiple DASD
                 extends. Also available is a nonprocedural facility
                 that allows a user to browse and update the database
                 without writing programs.",
  acknowledgement = ack-nhfb,
  annote =       "ER model based tool",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; database systems, TODS
                 E/R model; entity relationship model",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data description languages
                 (DDL)}; Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data manipulation languages
                 (DML)}; Information Systems --- Database Management ---
                 Systems (H.2.4); Software --- Software Engineering ---
                 Programming Environments (D.2.6)",
}

@Article{Moran:1983:CDO,
  author =       "Shlomo Moran",
  title =        "On the Complexity of Designing Optimal Partial-Match
                 Retrieval Systems",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "543--551",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P20 (68P10)",
  MRnumber =     "86j:68024",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Graphics/siggraph/83.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p543-moran/p543-moran.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p543-moran/",
  abstract =     "We consider the problem of designing an information
                 retrieval system on which partial match queries have to
                 be answered. Each record in the system consists of a
                 list of {\em attributes}, and a partial match query
                 specifies the values of some of the attributes. The
                 records are stored in {\em buckets\/} in a secondary
                 memory, and in order to answer a partial match query
                 all the buckets that may contain a record satisfying
                 the specifications of that query must be retrieved. The
                 bucket in which a given record is stored is found by a
                 multiple key hashing function, which maps each
                 attribute to a string of a fixed number of bits. The
                 address of that bucket is then represented by the
                 string obtained by concatenating the strings on which
                 the various attributes were mapped. A partial match
                 query may specify only part of the bits in the string
                 representing the address, and the larger the number of
                 bits specified, the smaller the number of buckets that
                 have to be retrieved in order to answer the query.
                 \par

                 The optimization problem considered in this paper is
                 that of deciding to how many bits each attribute should
                 be mapped by the bashing function above, so that the
                 expected number of buckets retrieved per query is
                 minimized. Efficient solutions for special cases of
                 this problem have been obtained in [1], [12], and [14].
                 It is shown that in general the problem is NP-hard, and
                 that if $P$ NP, it is also not fully approximable. Two
                 heuristic algorithms for the problem are also given and
                 compared.",
  acknowledgement = ack-nhfb,
  annote =       "Optimal variable bit lengths of hashstrings, it is
                 NP-hard.",
  classification = "723; 901; 922",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "approximation algorithms; computer programming ---
                 algorithms; file organization; hashing; information
                 science; NP-hard problems; optimization, TODS hashing,
                 searching; partial match retrieval; searching",
  oldlabel =     "geom-947",
  review =       "ACM CR 8411-0954",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- Nonnumerical Algorithms and
                 Problems (F.2.2); Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3)",
}

@Article{Ramamohanarao:1983:PMR,
  author =       "K. Ramamohanarao and John W. Lloyd and James A. Thom",
  title =        "Partial-Match Retrieval using Hashing and
                 Descriptors",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "552--576",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P10",
  MRnumber =     "794 538",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Graphics/siggraph/83.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p552-ramamohanarao/p552-ramamohanarao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p552-ramamohanarao/",
  abstract =     "This paper studies a partial-match retrieval scheme
                 based on hash functions and descriptors. The emphasis
                 is placed on showing how the use of a descriptor file
                 can improve the performance of the scheme. Records in
                 the file are given addresses according to hash
                 functions for each field in the record. Furthermore,
                 each page of the file has associated with it a
                 descriptor, which is a fixed-length bit string,
                 determined by the records actually present in the page.
                 Before a page is accessed to see if it contains records
                 in the answer to a query, the descriptor for the page
                 is checked. This check may show that no relevant
                 records are on the page and, hence, that the page does
                 not have to be accessed. The method is shown to have a
                 very substantial performance advantage over pure
                 hashing schemes, when some fields in the records have
                 large key spaces. A mathematical model of the scheme,
                 plus an algorithm for optimizing performance, is
                 given.",
  acknowledgement = ack-nhfb,
  classification = "723; 901; 921",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- Algorithms; DATA PROCESSING
                 --- File Organization; descriptors; dynamic file;
                 hashing; information science; MATHEMATICAL MODELS;
                 OPTIMIZATION; optimization; partial-match retrieval",
  oldlabel =     "geom-948",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}",
}

@Article{Ceri:1983:CQE,
  author =       "S. Ceri and G. Pelagatti",
  title =        "Correctness of Query Execution Strategies in
                 Distributed Databases",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "577--607",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p577-ceri/p577-ceri.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p577-ceri/",
  abstract =     "A major requirement of a Distributed DataBase
                 Management System (DDBMS) is to enable users to write
                 queries as though the database were not distributed
                 (distribution transparency). The DDBMS transforms the
                 user's queries into execution strategies, that is,
                 sequences of operations on the various nodes of the
                 network and of transmissions between them. An execution
                 strategy on a distributed database is correct if it
                 returns the same result as if the query were applied to
                 a nondistributed database.\par

                 This paper analyzes the correctness problem for query
                 execution strategies. A formal model, called
                 Multirelational Algebra, is used as a unifying
                 framework for this purpose. The problem of proving the
                 correctness of execution strategies is reduced to the
                 problem of proving the equivalence of two expressions
                 of Multirelational Algebra. A set of theorems on
                 equivalence is given in order to facilitate this task.
                 \par

                 The proposed approach can be used also for the
                 generation of correct execution strategies, because it
                 defines the rules which allow the transformation of a
                 correct strategy into an equivalent one. This paper
                 does not deal with the problem of evaluating equivalent
                 strategies, and therefore is not in itself a proposal
                 for a query optimizer for distributed databases.
                 However, it constitutes a theoretical foundation for
                 the design of such optimizers.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "correctness of database access; database systems;
                 distributed database access; read-only transactions;
                 relational algebra",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4): {\bf Distributed databases}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}; Information Systems --- Database
                 Management --- Systems (H.2.4); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}",
}

@Article{Biskup:1983:FCR,
  author =       "Joachim Biskup",
  title =        "A Foundation of {Codd}'s Relational Maybe Operators",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "608--636",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15 (03B70)",
  MRnumber =     "86j:68019",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/nonmono.bib; Compendex database;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p608-biskup/p608-biskup.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p608-biskup/",
  abstract =     "Database relations which possibly contain maybe-tuples
                 and null values of type ``value at present unknown''
                 are studied. Maybe-tuples and null values are formally
                 interpreted by our notion of {\em representation},
                 which uses classical notions of predicate logic,
                 elaborates Codd's proposal of maybe-tuples, and adopts
                 Reiter's concept of a closed world. Precise notions of
                 {\em information content\/} and {\em redundancy},
                 associated with our notion of representation, are
                 investigated. {\em Extensions of the relational
                 algebra\/} to relations with maybe-tuples and null
                 values are proposed. Our extensions are essentially
                 Codd's, with some modifications. It is proved that
                 these extensions have natural properties which are
                 formally stated as being {\em adequate\/} and {\em
                 restricted}.\par

                 By the treatment of difference and division, our formal
                 framework can be used even for operations that require
                 ``negative information.'' Finally, extensions of {\em
                 update operations\/} are discussed.",
  acknowledgement = ack-nhfb,
  annote =       "Join, Project, Select, Union, Difference is defined
                 for nulls which are not labeled, results include Maybe.
                 Division does not work. Update is discussed.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "closed world assumption; database systems; information
                 content; maybe-tuple; negative information; null value;
                 open word assumption; redundancy; relational algebra;
                 relational database; representation",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Computing Methodologies ---
                 Artificial Intelligence --- Knowledge Representation
                 Formalisms and Methods (I.2.4): {\bf Predicate logic}",
}

@Article{Ullman:1983:KCA,
  author =       "Jeffrey D. Ullman",
  title =        "On {Kent}'s {``Consequences of assuming a universal
                 relation''} ({Technical} correspondence)",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "637--643",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/database.bib; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See \cite{Kent:1981:CAU}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p637-ullman/p637-ullman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p637-ullman/",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "TODS technical correspondence",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0)",
}

@Article{Kent:1983:URR,
  author =       "William Kent",
  title =        "The universal relation revisited (technical
                 correspondence)",
  journal =      j-TODS,
  volume =       "8",
  number =       "4",
  pages =        "644--648",
  month =        dec,
  year =         "1983",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/database.bib; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1983-8-4/p644-kent/p644-kent.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1983-8-4/p644-kent/",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "TODS technical correspondence",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0)",
}

@Article{Kaplan:1984:DPN,
  author =       "S. Jerrold Kaplan",
  title =        "Designing a Portable Natural Language Database Query
                 System",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "1--19",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/ai.misc.bib; Compendex database;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p1-kaplan/p1-kaplan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p1-kaplan/",
  abstract =     "One barrier to the acceptance of natural language
                 database query systems is the substantial installation
                 effort required for each new database. Much of this
                 effort involves the encoding of semantic knowledge for
                 the domain of discourse, necessary to correctly
                 interpret and respond to natural language questions.
                 For such systems to be practical, techniques must be
                 developed to increase their portability to new domains.
                 \par

                 This paper discusses several issues involving the
                 portability of natural language interfaces to database
                 systems, and presents the approach taken in {\em
                 CO-OP\/} -- a natural language database query system
                 that provides cooperative responses to English
                 questions and operates with a typical CODA-SYL database
                 system. {\em CO-OP\/} derives its domain-specific
                 knowledge from a {\em lexicon\/} (the list of words
                 known to the system) and the information already
                 present in the structure and content of the underlying
                 database. Experience with the implementation suggests
                 that strategies that are not directly derivative of
                 cognitive or linguistic models may nonetheless play an
                 important role in the development of practical natural
                 language systems.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems",
}

@Article{Reiss:1984:PDS,
  author =       "Steven P. Reiss",
  title =        "Practical Data-Swapping: The First Steps",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "20--37",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p20-reiss/p20-reiss.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p20-reiss/",
  abstract =     "The problem of statistical database confidentiality in
                 releasing microdata is addressed through the use of
                 approximate data-swapping. Here, a portion of the
                 microdata is replaced with a database that has been
                 selected with approximately the same statistics. The
                 result guarantees the confidentiality of the original
                 data, while providing microdata with accurate
                 statistics. Methods for achieving such transformations
                 are considered and analyzed through simulation.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Security",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, TODS statistical databases",
  subject =      "Data --- Coding and Information Theory (E.4): {\bf
                 Nonsecret encoding schemes**}; Information Systems ---
                 Models and Principles --- Systems and Information
                 Theory (H.1.1): {\bf Value of information}; Information
                 Systems --- Information Storage and Retrieval ---
                 Online Information Services (H.3.5): {\bf Data
                 sharing}",
}

@Article{Nievergelt:1984:GFA,
  author =       "J. Nievergelt and Hans Hinterberger and Kenneth C.
                 Sevcik",
  title =        "The Grid File: An Adaptable, Symmetric Multikey File
                 Structure",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "38--71",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p38-nievergelt/p38-nievergelt.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p38-nievergelt/",
  abstract =     "Traditional file structures that provide multikey
                 access to records, for example, inverted files, are
                 extensions of file structures originally designed for
                 single-key access. They manifest various deficiencies
                 in particular for multikey access to highly dynamic
                 files. We study the dynamic aspects of file structures
                 that treat all keys symmetrically, that is, file
                 structures which avoid the distinction between primary
                 and secondary keys. We start from a bitmap approach and
                 treat the problem of file design as one of data
                 compression of a large sparse matrix. This leads to the
                 notions of a {\em grid partition\/} of the search space
                 and of a {\em grid directory}, which are the keys to a
                 dynamic file structure called the {\em grid file}. This
                 file system adapts gracefully to its contents under
                 insertions and deletions, and thus achieves an upper
                 bound of two disk accesses for single record retrieval;
                 it also handles range queries and partially specified
                 queries efficiently. We discuss in detail the design
                 decisions that led to the grid file, present simulation
                 results of its behavior, and compare it to other
                 multikey access file structures.",
  acknowledgement = ack-nhfb,
  annote =       "Grid files use a vector of hash-keys, partition the
                 result into clusters, and store the clusters into
                 blocks. Two accesses are used for retrieval. Update may
                 be more costly. Access structures fit in core?",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing, TODS multidimensional hashing,
                 multidimensional hashing",
  review =       "ACM CR 8411-0931",
}

@Article{Buchanan:1984:DMS,
  author =       "Jack R. Buchanan and Richard D. Fennell and Hanan
                 Samet",
  title =        "A Database Management System for the {Federal
                 Courts}",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "72--88",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p72-buchanan/p72-buchanan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p72-buchanan/",
  abstract =     "A judicial systems laboratory has been established and
                 several large-scale information management systems
                 projects have been undertaken within the Federal
                 Judicial Center in Washington, D.C. The newness of the
                 court application area, together with the experimental
                 nature of the initial prototypes, required that the
                 system building tools be as flexible and efficient as
                 possible for effective software design and development.
                 The size of the databases, the expected transaction
                 volumes, and the long-term value of the court records
                 required a data manipulation system capable of
                 providing high performance and integrity. The resulting
                 design criteria, the programming capabilities
                 developed, and their use in system construction are
                 described herein. This database programming facility
                 has been especially designed as a technical management
                 tool for the database administrator, while providing
                 the applications programmer with a flexible database
                 software interface for high productivity.
                 \par

                 Specifically, a network-type database management system
                 using SAIL as the data manipulation host language is
                 described. Generic data manipulation verb formats using
                 SAIL's macro facilities and dynamic data structuring
                 facilities allowing in-core database representations
                 have been developed to achieve a level of flexibility
                 not usually attained in conventional database
                 systems.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, TODS SAIL, network model",
}

@Article{Papadimitriou:1984:CCM,
  author =       "Christos H. Papadimitriou and Paris C. Kanellakis",
  title =        "On Concurrency Control by Multiple Versions",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "89--99",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p89-papadimitriou/p89-papadimitriou.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p89-papadimitriou/",
  abstract =     "We examine the problem of concurrency control when the
                 database management system supports multiple versions
                 of the data. We characterize the limit of the
                 parallelism achievable by the multiversion approach and
                 demonstrate the resulting space-parallelism
                 trade-off.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems",
}

@Article{Shultz:1984:RTA,
  author =       "Roger K. Shultz and Roy J. Zingg",
  title =        "Response Time Analysis of Multiprocessor Computers for
                 Database Support",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "100--132",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p100-shultz/p100-shultz.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p100-shultz/",
  abstract =     "Comparison of three multiprocessor computer
                 architectures for database support is made possible
                 through evaluation of response time expressions. These
                 expressions are derived by parameterizing algorithms
                 performed by each machine to execute a relational
                 algebra query. Parameters represent properties of the
                 database and components of the machines. Studies of
                 particular parameter values exhibit response times for
                 conventional machine technology, for low selectivity,
                 high duplicate occurrence, and parallel disk access,
                 increasing the number of processors, and improving
                 communication and processing technology.",
  acknowledgement = ack-nhfb,
  annote =       "analyzes DIRECT, HYPERTREE, and REPT, their own
                 proposal.",
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "COMPUTER SYSTEMS, DIGITAL --- multiprocessing, TODS
                 relational model, relational queries, direct,
                 hypertree, rept; database systems",
}

@Article{Valduriez:1984:JSA,
  author =       "Patrick Valduriez and Georges Gardarin",
  title =        "Join and Semijoin Algorithms for a Multiprocessor
                 Database Machine",
  journal =      j-TODS,
  volume =       "9",
  number =       "1",
  pages =        "133--161",
  month =        mar,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-1/p133-valduriez/p133-valduriez.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-1/p133-valduriez/",
  abstract =     "This paper presents and analyzes algorithms for
                 computing joins and semijoins of relations in a
                 multiprocessor database machine. First, a model of the
                 multiprocessor architecture is described, incorporating
                 parameters defining I/O, CPU, and message transmission
                 times that permit calculation of the execution times of
                 these algorithms. Then, three join algorithms are
                 presented and compared. It is shown that, for a given
                 configuration, each algorithm has an application domain
                 defined by the characteristics of the operand and
                 result relations. Since a semijoin operator is useful
                 for decreasing I/O and transmission times in a
                 multiprocessor system, we present and compare two
                 equi-semijoin algorithms and one non-equi-semijoin
                 algorithm. The execution times of these algorithms are
                 generally linearly proportional to the size of the
                 operand and result relations, and inversely
                 proportional to the number of processors. We then
                 compare a method which consists of joining two
                 relations to a method whereby one joins their
                 semijoins. Finally, it is shown that the latter method,
                 using semijoins, is generally better. The various
                 algorithms presented are implemented in the SABRE
                 database system; an evaluation model selects the best
                 algorithm for performing a join according to the
                 results presented here. A first version of the SABRE
                 system is currently operational at INRIA.",
  acknowledgement = ack-nhfb,
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "COMPUTER SYSTEMS, DIGITAL --- Multiprocessing;
                 database systems",
}

@Article{Christodoulakis:1984:ICA,
  author =       "S. Christodoulakis",
  title =        "Implications of Certain Assumptions in Database
                 Performance Evaluation",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "163--186",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "86k:68011",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-2/p163-christodoulakis/p163-christodoulakis.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p163-christodoulakis/",
  abstract =     "The assumptions of uniformity and independence of
                 attribute values in a file, uniformity of queries,
                 constant number of records per block, and random
                 placement of qualifying records among the blocks of a
                 file are frequently used in database performance
                 evaluation studies. In this paper we show that these
                 assumptions often result in predicting only an upper
                 bound of the expected system cost. We then discuss the
                 implications of nonrandom placement, nonuniformity, and
                 dependencies of attribute values on database design and
                 database performance evaluation.",
  acknowledgement = ack-nhfb,
  annote =       "After a somewhat cursory reading of the paper --- A
                 few comments: A. Uniform distributions, particularly
                 for parallel machines, may imply uniform distribution
                 of work over the machines --- thus giving an upper
                 bound for speedup --- thus a uniform distribution is an
                 optimistic assumption. B. For uniprocessor systems --
                 the assumption of a uniform dist. is optimistic when:
                 1. hashing --- fewer collisions, shorter lists at
                 collisions should be expected from a uniform
                 distribution. 2. trees --- more balancing costs may be
                 incurred for non-uniform distributions. 3. searching
                 --- for example, a binary search on a non-uniform could
                 cost significantly more. 4. sorting --- I suspect that
                 uniform distributions are optimal for some sorting
                 methods, although I haven't looked at this in any
                 detail.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, selectivity cost estimation approx
                 TODS",
}

@Article{Effelsberg:1984:LIP,
  author =       "Wolfgang Effelsberg and Mary E. S. Loomis",
  title =        "Logical, Internal, and Physical Reference Behavior in
                 {CODASYL} Database Systems",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "187--213",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-2/p187-effelsberg/p187-effelsberg.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p187-effelsberg/",
  abstract =     "This work investigates one aspect of the performance
                 of CODASYL database systems: the data reference
                 behavior. We introduce a model of database traversals
                 at three levels: the logical, internal, and physical
                 levels. The mapping between the logical and internal
                 levels is defined by the internal schema, whereas the
                 mapping between the internal and the physical levels
                 depends on cluster properties of the database. Our
                 model explains the physical reference behavior for a
                 given sequence of DML statements at the logical level.
                 \par

                 Software has been implemented to monitor references in
                 two selected CODASYL DBMS applications. In a series of
                 experiments the physical reference behavior was
                 observed for varying internal schemas and cluster
                 properties of the database. The measurements were
                 limited to retrieval transactions, so that a variety of
                 queries could be analyzed for the same well-known state
                 of the database. Also, all databases were relatively
                 small in order to allow fast reloading with varying
                 internal schema parameters. In all cases, the database
                 transactions showed less locality of reference than do
                 programs under virtual memory operating systems; some
                 databases showed no locality at all. No evidence of
                 physical sequentiality was found. This suggests that
                 standard page replacement strategies are not optimal
                 for CODASYL database buffer management; instead,
                 replacement decisions in a database buffer should be
                 based on specific knowledge available from higher
                 system layers.",
  acknowledgement = ack-nhfb,
  acmcrnumber =  "8506 0534",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems, TODS buffer management",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Computer Systems Organization ---
                 Performance of Systems (C.4): {\bf Measurement
                 techniques}; Software --- Operating Systems --- Storage
                 Management (D.4.2): {\bf Storage hierarchies}; Software
                 --- Operating Systems --- Performance (D.4.8): {\bf
                 Measurements}; Software --- Operating Systems ---
                 Performance (D.4.8): {\bf Modeling and prediction};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2); Information Systems ---
                 Database Management --- Logical Design (H.2.1)",
}

@Article{Kim:1984:PPR,
  author =       "Won Kim and Daniel Gajski and David J. Kuck",
  title =        "A Parallel Pipelined Relational Query Processor",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "214--242",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-2/p214-kim/p214-kim.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p214-kim/",
  abstract =     "This paper presents the design of a relational query
                 processor. The query processor consists of only four
                 processing PIPEs and a number of random-access memory
                 modules. Each PIPE processes tuples of relations in a
                 bit-serial, tuple-parallel manner for each of the
                 primitive database operations which comprise a complex
                 relational query. The design of the query processor
                 meets three major objectives: the query processor must
                 be manufacturable using existing and near-term LSI
                 (VLSI) technology; it must support in a uniform manner
                 both the numeric and nonnumeric processing requirements
                 a high-level user interface like SQL presents; and it
                 must support the query-processing strategy derived in
                 the query optimizer to satisfy certain system-wide
                 performance optimality criteria.",
  acknowledgement = ack-nhfb,
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer architecture; database systems; pipeline
                 processing; relational query processor, hardware
                 support database machine TODS",
  subject =      "Computer Systems Organization --- Processor
                 Architectures --- Other Architecture Styles (C.1.3):
                 {\bf High-level language architectures**}; Information
                 Systems --- Database Management --- Database Machines
                 (H.2.6)",
}

@Article{Al-Suwaiyel:1984:ATC,
  author =       "M. Al-Suwaiyel and E. Horowitz",
  title =        "Algorithms for Trie Compaction",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "243--263",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P05",
  MRnumber =     "794 541",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p243-ai-suwaiyel/",
  abstract =     "The trie data structure has many properties which make
                 it especially attractive for representing large files
                 of data. These properties include fast retrieval time,
                 quick unsuccessful search determination, and finding
                 the longest match to a given identifier. The main
                 drawback is the space requirement. In this paper the
                 concept of trie compaction is formalized. An exact
                 algorithm for optimal trie compaction and three
                 algorithms for approximate trie compaction are given,
                 and an analysis of the three algorithms is done. The
                 analyses indicate that for actual tries, reductions of
                 around 70 percent in the space required by the
                 uncompacted trie can be expected. The quality of the
                 compaction is shown to be insensitive to the number of
                 nodes, while a more relevant parameter is the alphabet
                 size of the key.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing",
  subject =      "Data --- Data Structures (E.1); Data --- Data Storage
                 Representations (E.2); Data --- Coding and Information
                 Theory (E.4): {\bf Data compaction and compression};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2); Theory of
                 Computation --- Analysis of Algorithms and Problem
                 Complexity --- Nonnumerical Algorithms and Problems
                 (F.2.2)",
}

@Article{Mendelzon:1984:DST,
  author =       "Alberto O. Mendelzon",
  title =        "Database States and Their Tableaux",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "264--282",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "794 542",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-2/p264-mendelzon/p264-mendelzon.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p264-mendelzon/",
  abstract =     "Recent work considers a database state to satisfy a
                 set of dependencies if there exists a satisfying
                 universal relation whose projections contain each of
                 the relations in the state. Such relations are called
                 {\em weak instances\/} for the state. We propose the
                 set of all weak instances for a state as an embodiment
                 of the information represented by the state. We
                 characterize states that have the same set of weak
                 instances by the equivalence of their associated
                 tableaux. We apply this notion to the comparison of
                 database schemes and characterize all pairs of schemes
                 such that for every legal state of one of them there
                 exists an equivalent legal state of the other one. We
                 use this approach to provide a new characterization of
                 Boyce-Codd Normal Form relation schemes.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "TODS weak instance assumption, database systems",
}

@Article{Maier:1984:FUR,
  author =       "David Maier and Jeffrey D. Ullman and Moshe Y. Vardi",
  title =        "On the Foundations of the Universal Relation Model",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "283--308",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "86m:68031",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-2/p283-maier/p283-maier.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p283-maier/",
  abstract =     "Two fundamentally different approaches to the
                 universal relation model have been taken. According to
                 the first approach, the user's view of the database is
                 a universal relation or many universal relations, about
                 which the user poses queries. The second approach sees
                 the model as having query-processing capabilities that
                 relieve the user of the need to specify the logical
                 access path. Thus, while the first approach gives a
                 denotational semantics to query answering, the second
                 approach gives it an operational semantics. The authors
                 investigate the relationship between these two
                 approaches.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Languages; Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}; Computing Methodologies ---
                 Artificial Intelligence --- Deduction and Theorem
                 Proving (I.2.3); Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Normal
                 forms}; Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}",
}

@Article{Katsuno:1984:ECF,
  author =       "Hirofumi Katsuno",
  title =        "An Extension of Conflict-free Multi-valued Dependency
                 Sets",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "309--326",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "86m:68029",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-2/p309-katsuno/p309-katsuno.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-2/p309-katsuno/",
  abstract =     "Several researchers (Beeri, Bernstein, Chiu, Fagin,
                 Goodman, Maier, Mendelzon, Ullman, and Yannakakis) have
                 introduced a special class of database schemes, called
                 {\em acyclic\/} or {\em tree\/} schemes. Beeri et al.
                 have shown that an acyclic join dependency, naturally
                 defined by an acyclic database scheme, has several
                 desirable properties, and that an acyclic join
                 dependency is equivalent to a conflict-free set of
                 multivalued dependencies. However, since their results
                 are confined to multivalued and join dependencies, it
                 is not clear whether we can handle functional
                 dependencies independently of other dependencies.
                 \par

                 In the present paper we define an extension of a
                 conflict-free set, called an {\em extended
                 conflict-free set}, including multivalued dependencies
                 and functional dependencies, and show the following two
                 properties of an extended conflict-free set:\par

                 There are three equivalent definitions of an extended
                 conflict-free set. One of them is defined as a set
                 including an acyclic joint dependency and a set of
                 functional dependencies such that the left and right
                 sides of each functional dependency are included in one
                 of the attribute sets that construct the acyclic join
                 dependency.\par

                 For a relation scheme with an extended conflict-free
                 set, there is a decomposition into third normal form
                 with a lossless join and preservation of
                 dependencies.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Normal forms}",
}

@Article{Korth:1984:SUD,
  author =       "Henry F. Korth and Gabriel M. Kuper and Joan
                 Feigenbaum and Allen {Van Gelder} and Jeffrey D.
                 Ullman",
  title =        "{System/U}: a Database System Based on the Universal
                 Relation Assumption",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "331--347",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p331-korth/p331-korth.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p331-korth/",
  abstract =     "System/U is a universal relation database system under
                 development at Stanford University which uses the
                 language C on UNIX. The system is intended to test the
                 use of the universal view, in which the entire database
                 is seen as one relation. This paper describes the
                 theory behind System/U, in particular the theory of
                 maximal objects and the connection between a set of
                 attributes. We also describe the implementation of the
                 DDL (Data Description Language) and the DML (Data
                 Manipulation Language), and discuss in detail how the
                 DDL finds maximal objects and how the DML determines
                 the connection between the attributes that appear in a
                 query.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; database systems;
                 RELATIONAL DATABASE; SYSTEM/U; UNIVERSAL RELATION",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data description languages (DDL)}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data manipulation languages (DML)}; Information
                 Systems --- Database Management --- Physical Design
                 (H.2.2): {\bf Access methods}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2): {\bf Path and
                 circuit problems}",
}

@Article{Wald:1984:RQI,
  author =       "Joseph A. Wald and Paul G. Sorenson",
  title =        "Resolving the Query Inference Problem Using {Steiner}
                 Trees",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "348--368",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Graphics/siggraph/86.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p348-wald/p348-wald.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p348-wald/",
  abstract =     "The query inference problem is to translate a sentence
                 of a query language into an unambiguous representation
                 of a query. A query is represented as an expression
                 over a set of query trees. A metric is introduced for
                 measuring the complexity of a query and also a proposal
                 that a sentence be translated into the least complex
                 query which `satisfies' the sentence. This method of
                 query inference can be used to resolve ambiguous
                 sentences and leads to easier formulation of
                 sentences.",
  acknowledgement = ack-nhfb,
  annote =       "MDCST resolves queries over attributes using a schema
                 tree.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Languages; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems; query inference problem; Steiner
                 trees, TODS E/R model",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Transaction processing}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Query formulation};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Graph algorithms};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Trees}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Query languages}",
}

@Article{Ramamohanarao:1984:RLH,
  author =       "K. Ramamohanarao and R. Sacks-Davis",
  title =        "Recursive Linear Hashing",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "369--391",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P10",
  MRnumber =     "794 545",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p369-ramamohanarao/p369-ramamohanarao.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p369-ramamohanarao/",
  abstract =     "A modification of linear hashing is proposed for which
                 the conventional use of overflow records is avoided.
                 Furthermore, an implementation of linear hashing is
                 presented for which the amount of physical storage
                 claimed is only fractionally more than the minimum
                 required. This implementation uses a fixed amount of
                 in-core space. Simulation results are given which
                 indicate that even for storage utilizations approaching
                 95 percent, the average successful search cost for this
                 method is close to one disk access.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "COMPUTER SIMULATION; data processing; DYNAMIC FILES;
                 LINEAR HASHING",
  subject =      "Data --- Data Storage Representations (E.2): {\bf
                 Hash-table representations}; Software --- Operating
                 Systems --- File Systems Management (D.4.3): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Storage (H.3.2):
                 {\bf File organization}; Computing Methodologies ---
                 Simulation and Modeling --- Applications (I.6.3);
                 Software --- Operating Systems --- Storage Management
                 (D.4.2): {\bf Secondary storage}",
}

@Article{Cooper:1984:ATU,
  author =       "Robert B. Cooper and Martin K. Solomon",
  title =        "The Average Time Until Bucket Overflow",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "392--408",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p392-cooper/p392-cooper.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p392-cooper/",
  abstract =     "It is common for file structures to be divided into
                 equal-length partitions, called buckets, into which
                 records arrive for insertion and from which records are
                 physically deleted. We give a simple algorithm which
                 permits calculation of the average time until overflow
                 for a bucket of capacity $n$ records, assuming that
                 record insertions and deletions can be modeled as a
                 stochastic process in the usual manner of queueing
                 theory. We present some numerical examples, from which
                 we make some general observations about the
                 relationships among insertion and deletion rates,
                 bucket capacity, initial fill, and average time until
                 overflow. In particular, we observe that it makes sense
                 to define the {\em stable point\/} as the product of
                 the arrival rate and the average residence time of the
                 records; then a bucket tends to fill up to its stable
                 point quickly, in an amount of time almost independent
                 of the stable point, but the average time until
                 overflow increases rapidly with the difference between
                 the bucket capacity and the stable point.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "BUCKET OVERFLOW; data processing",
}

@Article{March:1984:SER,
  author =       "Salvatore T. March and Gary D. Scudder",
  title =        "On the Selection of Efficient Record Segmentations and
                 Backup Strategies for Large Shared Databases",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "409--438",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p409-march/p409-march.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p409-march/",
  abstract =     "In recent years the information processing
                 requirements of business organizations have expanded
                 tremendously. With this expansion, the design of
                 databases to efficiently manage and protect business
                 information has become critical. We analyze the impacts
                 of {\em record segmentation\/} (the assignment of data
                 items to segments defining subfiles), an
                 efficiency-oriented design technique, and of {\em
                 backup and recovery strategies}, a data protection
                 technique, on the overall process of database design. A
                 combined record segmentation/backup and recovery
                 procedure is presented and an application of the
                 procedure is discussed. Results in which problem
                 characteristics are varied along three dimensions:
                 update frequencies, available types of access paths,
                 and the predominant type of data retrieval that must be
                 supported by the database, are presented.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "backup strategies; database systems; large shared
                 databases; record segmentations",
  subject =      "Data --- Files (E.5): {\bf Organization/structure};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Software --- Operating Systems ---
                 Reliability (D.4.5): {\bf Backup procedures}; Software
                 --- Operating Systems --- File Systems Management
                 (D.4.3): {\bf File organization}; Data --- Files (E.5):
                 {\bf Backup/recovery}; Information Systems --- Database
                 Management --- Database Administration (H.2.7): {\bf
                 Logging and recovery}",
}

@Article{Manber:1984:CCD,
  author =       "Udi Manber and Richard E. Ladner",
  title =        "Concurrency Control in a Dynamic Search Structure",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "439--455",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68N25 (68P15)",
  MRnumber =     "794 546",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: ACM Proc. on Database Systems,
                 Boston, Apr. 1982, pp. 268--282.",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p439-manbar/",
  abstract =     "A design of a data structure and efficient algorithms
                 for concurrent manipulations of a dynamic search
                 structure by independent user processes is presented in
                 this paper. The algorithms include updating data,
                 inserting new elements, and deleting elements. The
                 algorithms support a high level of concurrency. Each of
                 the operations listed above requires only constant
                 amount of locking. In order to make the system even
                 more efficient for the user processes, maintenance
                 processes are introduced. The maintenance processes
                 operate independently in the background to reorganize
                 the data structure and ``clean up'' after the (more
                 urgent) user processes. A proof of correctness of the
                 algorithms is given and some experimental results and
                 extensions are examined.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- Algorithms; concurrency
                 control; data processing; dynamic search structure",
}

@Article{Davidson:1984:OCP,
  author =       "Susan B. Davidson",
  title =        "Optimism and Consistency in Partitioned Distributed
                 Database Systems",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "456--481",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "794 547",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p456-davidson/p456-davidson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p456-davidson/",
  abstract =     "A protocol for transaction processing during partition
                 failures is presented which guarantees mutual
                 consistency between copies of data-items after repair
                 is completed. The protocol is ``optimistic'' in that
                 transactions are processed without restrictions during
                 failure; conflicts are then detected at repair time
                 using a {\em precedence graph}, and are resolved by
                 backing out transactions according to some {\em backout
                 strategy}. The resulting database state then
                 corresponds to a serial execution of some subset of
                 transactions run during the failure. Results from
                 simulation and probabilistic modeling show that the
                 optimistic protocol is a reasonable alternative in many
                 cases. Conditions under which the protocol performs
                 well are noted, and suggestions are made as to how
                 performance can be improved. In particular, a backout
                 strategy is presented which takes into account
                 individual transaction costs and attempts to minimize
                 total backout cost. Although the problem of choosing
                 transactions to minimize total backout cost is, in
                 general, NP-complete, the backout strategy is efficient
                 and produces very good results.",
  acknowledgement = ack-nhfb,
  annote =       "Counter example to uniform is last hypothesis of
                 Christodoulakis.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems; DISTRIBUTED database systems",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Information
                 Systems --- Database Management --- Physical Design
                 (H.2.2): {\bf Recovery and restart}; Mathematics of
                 Computing --- Discrete Mathematics --- Graph Theory
                 (G.2.2)",
}

@Article{Ibaraki:1984:ONO,
  author =       "Toshihide Ibaraki and Tiko Kameda",
  title =        "On the Optimal Nesting Order for Computing $ {N}
                 $-Relational Joins",
  journal =      j-TODS,
  volume =       "9",
  number =       "3",
  pages =        "482--502",
  month =        sep,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "794 548",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-3/p482-ibaraki/p482-ibaraki.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-3/p482-ibaraki/",
  abstract =     "Using the nested loops method, this paper addresses
                 the problem of minimizing the number of page fetches
                 necessary to evaluate a given query to a relational
                 database. We first propose a data structure whereby the
                 number of page fetches required for query evaluation is
                 substantially reduced and then derive a formula for the
                 expected number of page fetches. An optimal solution to
                 our problem is the nesting order of relations in the
                 evaluation program, which minimizes the number of page
                 fetches. Since the minimization of the formula is
                 NP-hard, as shown in the Appendix, we propose a
                 heuristic algorithm which produces a good suboptimal
                 solution in polynomial time. For the special case where
                 the input query is a ``tree query,'' we present an
                 efficient algorithm for finding an optimal nesting
                 order.",
  acknowledgement = ack-nhfb,
  acmcrnumber =  "8506 0535",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "N-relational joins; optimal nesting order; TODS query
                 optimization processing relational model, database
                 systems",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing}; Theory
                 of Computation --- Analysis of Algorithms and Problem
                 Complexity --- Nonnumerical Algorithms and Problems
                 (F.2.2); Information Systems --- Database Management
                 --- Logical Design (H.2.1): {\bf Data models};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@Article{Elhardt:1984:DCH,
  author =       "Klaus Elhardt and Rudolf Bayer",
  title =        "A Database Cache for High Performance and Fast Restart
                 in Database Systems",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "503--525",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p503-elhardt/p503-elhardt.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p503-elhardt/",
  abstract =     "Performance in database systems is strongly influenced
                 by buffer management and transaction recovery methods.
                 This paper presents the principles of the database
                 cache, which replaces the traditional buffer. In
                 comparison to buffer management, cache management is
                 more carefully coordinated with transaction management,
                 and integrates transaction recovery. High throughput of
                 \par

                 small- and medium-sized transactions is achieved by
                 fast commit processing and low database traffic. Very
                 fast handling of transaction failures and short restart
                 time after system failure are guaranteed in such an
                 environment. Very long retrieval and update
                 transactions are also supported.",
  acknowledgement = ack-nhfb,
  affiliation =  "Technische Univ, Muenchen, Inst fuer Informatik,
                 Munich, West Ger",
  affiliationaddress = "Technische Univ, Muenchen, Inst fuer Informatik,
                 Munich, West Ger",
  annote =       "The Elhardt-Bayer cache does indeed resemble Alpine in
                 many important respects. The primary difference is that
                 it requires all of a transaction's updates to be
                 written to the log in contiguous log pages, which
                 allows some compact encodings to be used in
                 representing the log, but also means that more work
                 must be done synchronously at commit time. Also, their
                 scheme is not designed to support two-phase commit, and
                 extending it to handle two-phase commit is sure to mess
                 up the pretty log encoding, I think. ---Mark Brown.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer management; crash recovery; data processing;
                 database cache; database systems; fast restart; media
                 failure",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Software ---
                 Operating Systems --- Storage Management (D.4.2): {\bf
                 Main memory}",
}

@Article{Reuter:1984:PAR,
  author =       "Andreas Reuter",
  title =        "Performance Analysis of Recovery Techniques",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "526--559",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p526-reuter/p526-reuter.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p526-reuter/",
  abstract =     "Various logging and recovery techniques for
                 centralized transaction-oriented database systems under
                 performance aspects are described and discussed. The
                 classification of functional principles that has been
                 developed in a companion paper is used as a
                 terminological basis. In the main sections, a set of
                 analytic models is introduced and evaluated in order to
                 compare the performance characteristics of nine
                 different recovery techniques with respect to four key
                 parameters and a set of other parameters with less
                 influence. Finally, the results of model evaluation as
                 well as the limitations of the models themselves are
                 discussed.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Kaiserslautern, Kaiserslautern, West Ger",
  affiliationaddress = "Univ of Kaiserslautern, Kaiserslautern, West
                 Ger",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- algorithms; data processing;
                 database systems; logging and recovery; recovery and
                 restart; recovery techniques; Reliability; transaction
                 processing",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart};
                 Software --- Operating Systems --- Reliability (D.4.5):
                 {\bf Fault-tolerance}; Software --- Operating Systems
                 --- Performance (D.4.8): {\bf Modeling and prediction};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Information
                 Systems --- Database Management --- Database
                 Administration (H.2.7): {\bf Logging and recovery}",
}

@Article{Effelsberg:1984:PDB,
  author =       "Wolfgang Effelsberg and Theo Haerder",
  title =        "Principles of Database Buffer Management",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "560--595",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p560-effelsberg/p560-effelsberg.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p560-effelsberg/",
  abstract =     "This paper discusses the implementation of a database
                 buffer manager as a component of a DBMS. The interface
                 between calling components of higher system layers and
                 the buffer manager is described; the principal
                 differences between virtual memory paging and database
                 buffer management are outlined; the notion of
                 referencing versus addressing of database pages is
                 introduced; and the concept of fixing pages in the
                 buffer to prevent uncontrolled replacement is
                 explained.\par

                 Three basic tasks have to be performed by the buffer
                 manager: buffer search, allocation of frames to
                 concurrent transactions, and page replacement. For each
                 of these tasks, implementation alternatives are
                 discussed and illustrated by examples from a
                 performance evaluation project of a CODASYL DBMS.",
  acknowledgement = ack-nhfb,
  affiliation =  "IBM, Scientific Cent, Heidelberg, West Ger",
  affiliationaddress = "IBM, Scientific Cent, Heidelberg, West Ger",
  annote =       "an interface between the buffer manager and the DBMS,
                 choices of page replacement policies; does not cover
                 sequential I/O (read-ahead and write-behind).",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer management; computer programming ---
                 algorithms; data processing; database systems; memory
                 paging; referencing database pages; replacement
                 algorithms",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2); Software --- Operating Systems
                 --- Storage Management (D.4.2): {\bf Storage
                 hierarchies}",
}

@Article{Bernstein:1984:ACC,
  author =       "Philip A. Bernstein and Nathan Goodman",
  title =        "An Algorithm for Concurrency Control and Recovery in
                 Replicated Distributed Databases",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "596--615",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "86k:68010",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib;
                 Distributed/fault.tolerant.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p596-bernstein/p596-bernstein.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p596-bernstein/",
  abstract =     "In a one-copy distributed database, each data item is
                 stored at exactly one site. In a replicated database,
                 some data items may be stored at multiple sites. The
                 main motivation is improved reliability: by storing
                 important data at multiple sites, the DBS can operate
                 even though some sites have failed.\par

                 This paper describes an algorithm for handling
                 replicated data, which allows users to operate on data
                 so long as one copy is ``available.'' A copy is
                 ``available'' when (i) its site is up, and (ii) the
                 copy is not out-of-date because of an earlier crash.
                 \par

                 The algorithm handles clean, detectable site failures,
                 but not Byzantine failures or network partitions.",
  acknowledgement = ack-nhfb,
  affiliation =  "Sequoia Systems Inc, Marlborough, MA, USA",
  affiliationaddress = "Sequoia Systems Inc, Marlborough, MA, USA",
  annote =       "3-phase commit. The first and third phases are
                 identical to the two phases of 2-phase commit. There is
                 a `Precommit' phase after the first phase where the
                 knowledge of the coordinator is replicated elsewhere,
                 thus protecting against a crash of the coordinator
                 (which could result in locks being tied up for long
                 periods).",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Algorithms; computer programming; concurrency control
                 and recovery; continuous operation; database systems;
                 replicated distributed databases; serializability;
                 transaction processing",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Distributed databases}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Transaction processing}",
}

@Article{Chen:1984:ANV,
  author =       "Wen Chin Chen and Jeffrey Scott Vitter",
  title =        "Analysis of New Variants of Coalesced Hashing",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "616--645",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P10",
  MRnumber =     "794 550",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p616-chen/p616-chen.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p616-chen/",
  abstract =     "The coalesced hashing method has been shown to be very
                 fast for dynamic information storage and retrieval.
                 This paper analyzes in a uniform way the performance of
                 coalesced hashing and its variants, thus settling some
                 open questions in the literature.\par

                 In all the variants, the range of the hash function is
                 called the {\em address region}, and extra space
                 reserved for storing colliders is called the {\em
                 cellar}. We refer to the unmodified method, which was
                 analyzed previously, as {\em late-insertion\/}
                 coalesced hashing. In this paper we analyze late
                 insertion and two new variations called {\em early
                 insertion\/} and {\em varied insertion}. When there is
                 no cellar, the early-insertion method is better than
                 late insertion; however, past experience has indicated
                 that it might be worse when there is a cellar. Our
                 analysis confirms that it is worse. The
                 varied-insertion method was introduced as a means of
                 combining the advantages of late insertion and early
                 insertion. This paper shows that varied insertion
                 requires fewer probes per search, on the average, than
                 do the other variants.\par

                 Each of these three coalesced hashing methods has a
                 parameter that relates the sizes of the address region
                 and the cellar. Techniques in this paper are designed
                 for tuning the parameter in order to achieve optimum
                 search times. We conclude with a list of open
                 problems.",
  acknowledgement = ack-nhfb,
  affiliation =  "Brown Univ, Dep of Computer Science, Providence, RI,
                 USA",
  affiliationaddress = "Brown Univ, Dep of Computer Science, Providence,
                 RI, USA",
  annote =       "Chaining and open addressing. Internal memory is
                 assumed!",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "address region; cellar; coalesced hashing; computer
                 programming --- algorithms; data processing; early
                 insertion; information retrieval systems; late
                 insertion",
  subject =      "Data --- Data Storage Representations (E.2): {\bf
                 Hash-table representations}; Software --- Software
                 Engineering --- Metrics (D.2.8): {\bf Performance
                 measures}; Theory of Computation --- Analysis of
                 Algorithms and Problem Complexity --- Nonnumerical
                 Algorithms and Problems (F.2.2): {\bf Sorting and
                 searching}; Mathematics of Computing --- Discrete
                 Mathematics --- Combinatorics (G.2.1): {\bf Generating
                 functions}; Mathematics of Computing --- Discrete
                 Mathematics --- Combinatorics (G.2.1): {\bf
                 Permutations and combinations}; Mathematics of
                 Computing --- Discrete Mathematics --- Combinatorics
                 (G.2.1): {\bf Recurrences and difference equations};
                 Mathematics of Computing --- Probability and Statistics
                 (G.3): {\bf Random number generation}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Search
                 process}",
}

@Article{Deogun:1984:OCF,
  author =       "J. S. Deogun and V. V. Raghavan and T. K. W. Tsou",
  title =        "Organization of Clustered Files for Consecutive
                 Retrieval",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "646--671",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p646-deogun/p646-deogun.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p646-deogun/",
  abstract =     "This paper studies the problem of storing single-level
                 and multilevel clustered files. Necessary and
                 sufficient conditions for a single-level clustered file
                 to have the consecutive retrieval property (CRP) are
                 developed. A linear time algorithm to test the CRP for
                 a given clustered file and to identify the proper
                 arrangement of objects, if CRP exists, is presented.
                 For the single-level clustered files that do not have
                 CRP, it is shown that the problem of identifying a
                 storage organization with minimum redundancy is
                 NP-complete.\par

                 Consequently, an efficient heuristic algorithm to
                 generate a good storage organization for such files is
                 developed. Furthermore, it is shown that, for certain
                 types of multilevel clustered files, there exists a
                 storage organization such that the objects in each
                 cluster, for all clusters in each level of the
                 clustering, appear in consecutive locations.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Nebraska, Lincoln, NE, USA",
  affiliationaddress = "Univ of Nebraska, Lincoln, NE, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "CLUSTERED FILES; computer programming --- Algorithms;
                 CONSECUTIVE RETRIEVAL; data processing --- File
                 Organization; FILE ORGANIZATION; information retrieval
                 systems; NP-COMPLETE",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Clustering}; Information Systems --- Information
                 Storage and Retrieval --- Information Storage (H.3.2):
                 {\bf File organization}",
}

@Article{Traub:1984:SSS,
  author =       "J. F. Traub and Y. Yemini and H. Wozniakowski",
  title =        "The Statistical Security of a Statistical Database",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "672--679",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sun Dec 8 08:54:10 MST 1996",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  abstract =     "This study proposes a statistical perturbation scheme
                 to protect a statistical database against compromise.
                 The proposed scheme can handle the security of
                 numerical as well as nonnumerical sensitive fields or a
                 combination of fields. Furthermore, knowledge of some
                 records in a database does not help to compromise
                 unknown records. The authors use Chebyshev's inequality
                 to analyze the trade-offs among the magnitude of the
                 perturbations, the error incurred by statistical
                 queries, and the size of the query set to which they
                 apply. They show that if the statistician is given
                 absolute error guarantees, then a compromise is
                 possible, but the cost is made exponential in the size
                 of the database.",
  acknowledgement = ack-nhfb,
  affiliation =  "Columbia Univ, Dep of Computer Science, New York, NY,
                 USA",
  affiliationaddress = "Columbia Univ, Dep of Computer Science, New
                 York, NY, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Chebyshev's inequality; complexity of compromise; data
                 processing --- Security of data; database systems;
                 security; statistical database",
}

@Article{Navathe:1984:VPA,
  author =       "Shamkant Navathe and Stefano Ceri and Gio Wiederhold
                 and Jinglie Dou",
  title =        "Vertical Partitioning Algorithms for Database Design",
  journal =      j-TODS,
  volume =       "9",
  number =       "4",
  pages =        "680--710",
  month =        dec,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  note =         "Also published in/as: Stanford Un., TR-CS-82-957, Jan.
                 1983, revised Aug. 1983.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1984-9-4/p680-navathe/p680-navathe.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1984-9-4/p680-navathe/",
  abstract =     "This paper addresses the vertical partitioning of a
                 set of logical records or a relation into fragments.
                 The rationale behind vertical partitioning is to
                 produce fragments, groups of attribute columns, that
                 ``closely match'' the requirements of transactions.
                 \par

                 Vertical partitioning is applied in three contexts: a
                 database stored on devices of a single type, a database
                 stored in different memory levels, and a distributed
                 database. In a two-level memory hierarchy, most
                 transactions should be processed using the fragments in
                 primary memory. In distributed databases, fragment
                 allocation should maximize the amount of local
                 transaction processing.\par

                 Fragments may be nonoverlapping or overlapping. A
                 two-phase approach for the determination of fragments
                 is proposed; in the first phase, the design is driven
                 by empirical objective functions which do not require
                 specific cost information. The second phase performs
                 cost optimization by incorporating the knowledge of a
                 specific application environment. The algorithms
                 presented in this paper have been implemented, and
                 examples of their actual use are shown.",
  acknowledgement = ack-nhfb,
  affiliation =  "Stanford Univ, Dep of Computer Science, Stanford, CA,
                 USA",
  affiliationaddress = "Stanford Univ, Dep of Computer Science,
                 Stanford, CA, USA",
  annote =       "based on affinity considerations.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "Algorithms; CLUSTERS; computer programming; data
                 processing; database systems --- Design; FRAGMENT
                 ALLOCATION; MEMORY LEVELS; VERTICAL PARTITIONING
                 ALGORITHMS",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1); Information Systems ---
                 Database Management --- Physical Design (H.2.2);
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}",
}

@Article{Maier:1984:DFG,
  author =       "D. Maier",
  title =        "Databases in the Fifth Generation Project: Is {Prolog}
                 a Database Language?",
  journal =      j-TODS,
  volume =       "9",
  number =       "2",
  pages =        "??--??",
  month =        jun,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:48:52 1996",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: ACM SIGMOD, 1984.",
  annote =       "very readable discussion, includes links to universal
                 relation research.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  xxnote =       "This paper does not seem to be published in TODS.",
}

@Article{Skeen:1984:IAP,
  author =       "D. Skeen and D. D. Wright",
  title =        "Increasing Availability in Partitioned Database
                 Systems",
  journal =      j-TODS,
  volume =       "??",
  number =       "??",
  pages =        "290--299",
  month =        Apr,
  year =         "1984",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:48:54 1996",
  bibsource =    "Distributed/Dist.Sys.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  xxnote =       "This paper does not seem to be published in TODS.",
}

@Article{Franaszek:1985:LCT,
  author =       "Peter Franaszek and John T. Robinson",
  title =        "Limitations of Concurrency in Transaction Processing",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "1--28",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/real.time.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p1-franaszek/p1-franaszek.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p1-franaszek/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3160.html",
  abstract =     "Given the pairwise probability of conflict p among
                 transactions in a transaction processing system,
                 together with the total number of concurrent
                 transactions n, the effective level of concurrency
                 E(n,p) is defined as the expected number of the n
                 transactions that can run concurrently and actually do
                 useful work. Using a random graph model of concurrency,
                 we show for three general classes of concurrency
                 control methods, examples of which are (1) standard
                 locking, (2) strict priority scheduling, and (3)
                 optimistic methods, that (1) E(n, p) n(1 - p/2) n-1,
                 (2) E(n, p) (1 - (1 - p) n)/p, and (3) 1 + ((1 -
                 p)/p)ln(p(n - 1) + 1) E(n, p) 1 + (1/p)ln(p(n - 1) +
                 1). Thus, for fixed p, as n ??, (1) E 0 for standard
                 locking methods, (2) E 1/p for strict priority
                 scheduling methods, and (3) E for optimistic methods.
                 Also found are bounds on E in the case where conflicts
                 are analyzed so as to maximize E.\par

                 The predictions of the random graph model are confirmed
                 by simulations of an abstract transaction processing
                 system. In practice, though, there is a price to pay
                 for the increased effective level of concurrency of
                 methods (2) and (3): using these methods there is more
                 wasted work (i.e., more steps executed by transactions
                 that are later aborted). In response to this problem,
                 three new concurrency control methods suggested by the
                 random graph model analysis are developed. Two of
                 these, called (a) running priority and (b) older or
                 running priority, are shown by the simulation results
                 to perform better than the previously known methods
                 (l)-(3) for relatively large n or large p, in terms of
                 achieving a high effective level of concurrency at a
                 comparatively small cost in wasted work.",
  acknowledgement = ack-nhfb,
  affiliation =  "IBM, Thomas J. Watson Research Cent, Yorktown Heights,
                 NY, USA",
  affiliationaddress = "IBM, Thomas J. Watson Research Cent, Yorktown
                 Heights, NY, USA",
  annote =       "6 methods, incl. optimistic (best) but not
                 version-ing.",
  classification = "722; 723; 921",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer systems, digital --- multiprocessing;
                 concurrency control; database systems; mathematical
                 techniques --- graph theory; performance; theory;
                 transaction processing, algorithms; verification",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Distributed systems. {\bf F.2.2}: Theory of
                 Computation, ANALYSIS OF ALGORITHMS AND PROBLEM
                 COMPLEXITY, Nonnumerical Algorithms and Problems. {\bf
                 D.1.3}: Software, PROGRAMMING TECHNIQUES, Concurrent
                 Programming. {\bf D.4.1}: Software, OPERATING SYSTEMS,
                 Process Management, Concurrency.",
}

@Article{Sacca:1985:DPC,
  author =       "Domenico Sacca and Gio Wiederhold",
  title =        "Database Partitioning in a Cluster of Processors",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "29--56",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Parallel/Multi.bib",
  note =         "Also published in
                 \cite[242--247]{Schkolnick:1983:ICV}, and IBM Research
                 Report No. RJ-4076, 1983.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p29-sacca/p29-sacca.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p29-sacca/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3161.html",
  abstract =     "In a distributed database system the partitioning and
                 allocation of the database over the processor nodes of
                 the network can be a critical aspect of the database
                 design effort. In this paper we develop and evaluate
                 algorithms that perform this task in a computationally
                 feasible manner. The network we consider is
                 characterized by a relatively high communication
                 bandwidth, considering the processing and input output
                 capacities in its processors. Such a balance is typical
                 if the processors are connected via busses or local
                 networks. The common constraint that transactions have
                 a specific root node no longer exists, so that there
                 are more distribution choices. However, a poor
                 distribution leads to less efficient computation,
                 higher costs, and higher loads in the nodes or in the
                 communication network so that the system may not be
                 able to handle the required set of transactions.
                 \par

                 Our approach is to first split the database into
                 fragments which constitute appropriate units for
                 allocation. The fragments to be allocated are selected
                 based on maximal benefit criteria using a greedy
                 heuristic. The assignment to processor nodes uses a
                 first-fit algorithm. The complete algorithm, called
                 GFF, is stated in a procedural form.\par

                 The complexity of the problem and of its candidate
                 solutions are analyzed and several interesting
                 relationships are proven. Alternate benefit metrics are
                 considered, since the execution cost of the allocation
                 procedure varies by orders of magnitude with the
                 alternatives of benefit evaluation. A mixed benefit
                 evaluation strategy is eventually proposed.\par

                 A model for evaluation is presented. Two of the
                 strategies are experimentally evaluated, and the
                 reported results support the discussion. The approach
                 should be suitable for other cases where resources have
                 to be allocated subject to resource constraints.",
  acknowledgement = ack-nhfb,
  affiliation =  "IBM, Research Lab, San Jose, CA, USA",
  affiliationaddress = "IBM, Research Lab, San Jose, CA, USA",
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer systems, digital --- multiprocessing;
                 database partitioning, parallelism declustering
                 partitioning disk striping TODS, algorithms; database
                 systems; design; theory",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Distributed systems. {\bf C.2.4}: Computer
                 Systems Organization, COMPUTER-COMMUNICATION NETWORKS,
                 Distributed Systems.",
}

@Article{Pramanik:1985:UGT,
  author =       "Sakti Pramanik and David Ittner",
  title =        "Use of Graph-Theoretic Models for Optimal Relational
                 Database Accesses to Perform Join",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "57--74",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "794 551",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p57-pramanik/p57-pramanik.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p57-pramanik/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3325.html",
  abstract =     "A graph model is presented to analyze the performance
                 of a relational join. The amount of page reaccesses,
                 the page access sequence, and the amount of buffer
                 needed are represented in terms of graph parameters. By
                 using the graph model formed from the index on the join
                 attributes, we determine the relationships between
                 these parameters. Two types of buffer allocation
                 strategies are studied, and the upper bound on the
                 buffer size with no page reaccess is given. This bound
                 is shown to be the maximum cut value of a graph. Hence,
                 the problem of computing this upper bound is NP-hard.
                 We also give algorithms to determine a page access
                 sequence requiring a near optimal buffer size with no
                 page reaccess. The optimal page access sequence for a
                 fixed buffer size has also been considered.",
  acknowledgement = ack-nhfb,
  affiliation =  "Michigan State Univ, Computer Science Dep, East
                 Lansing, MI, USA",
  affiliationaddress = "Michigan State Univ, Computer Science Dep, East
                 Lansing, MI, USA",
  annote =       "buffer management for indexes.",
  classification = "723; 921",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Experimentation; Measurement; Performance;
                 Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- algorithms; database systems;
                 experimentation; graph-theoretic models, query
                 optimization processing TODS, algorithms; mathematical
                 techniques --- graph theory; measurement; performance;
                 relational database accesses; relational join; theory",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf G.2.2}: Mathematics of
                 Computing, DISCRETE MATHEMATICS, Graph Theory, Graph
                 algorithms.",
}

@Article{Larson:1985:LHO,
  author =       "Per-{\AA}ke Larson",
  title =        "Linear Hashing with Overflow-Handling by Linear
                 Probing",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "75--89",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p75-larson/p75-larson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p75-larson/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3324.html",
  abstract =     "Linear hashing is a file structure for dynamic files.
                 In this paper, a new, simple method for handling
                 overflow records in connection with linear hashing is
                 proposed. The method is based on linear probing and
                 does not rely on chaining. No dedicated overflow area
                 is required. The expansion sequence of linear hashing
                 is modified to improve the performance, which requires
                 changes in the address computation. A new address
                 computation algorithm and an expansion algorithm are
                 given. The performance of the method is studied by
                 simulation. The algorithms for the basic file
                 operations are very simple, and the overall performance
                 is competitive with that of other variants of linear
                 hashing.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Waterloo, Dep of Computer Science, Waterloo,
                 Ont, Can",
  affiliationaddress = "Univ of Waterloo, Dep of Computer Science,
                 Waterloo, Ont, Can",
  annote =       "New algorithm for files that grow and shrink
                 dynamically; the overflow records of a full page are
                 directed to the next page of a group; the introduction
                 of five groups and the backwards split order makes this
                 algorithm better than previous ones.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- algorithms; data processing;
                 database systems; design; dynamic hashing; file
                 organization; linear hashing; measurement; open
                 addressing, algorithms; performance",
  review =       "ACM CR 8512-1134",
  subject =      "{\bf E.2}: Data, DATA STORAGE REPRESENTATIONS,
                 Hash-table representations. {\bf D.2.2}: Software,
                 SOFTWARE ENGINEERING, Tools and Techniques, Decision
                 tables. {\bf E.5}: Data, FILES,
                 Organization/structure.",
}

@Article{Veklerov:1985:ADH,
  author =       "Eugene Veklerov",
  title =        "Analysis of Dynamic Hashing with Deferred Splitting",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "90--96",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p90-veklerov/p90-veklerov.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p90-veklerov/",
  abstract =     "Dynamic hashing with deferred splitting is a file
                 organization scheme which increases storage
                 utilization, as compared to `standard' dynamic hashing.
                 In this scheme, splitting of a bucket is deferred if
                 the bucket is full but its brother can accommodate new
                 records. The performance of the scheme is analyzed. In
                 a typical case the expected storage utilization
                 increases from 69 to 76 percent.",
  acknowledgement = ack-nhfb,
  affiliation =  "Lawrence Berkeley Lab, Real Time Systems Group,
                 Berkeley, CA, USA",
  affiliationaddress = "Lawrence Berkeley Lab, Real Time Systems Group,
                 Berkeley, CA, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; database systems; DEFERRED SPLITTING;
                 DYNAMIC HASHING; File Organization; STORAGE
                 UTILIZATION",
}

@Article{Palvia:1985:EBS,
  author =       "Prashant Palvia",
  title =        "Expressions for Batched Searching of Sequential and
                 Hierarchical Files",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "97--106",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p97-palvia/p97-palvia.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p97-palvia/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3326.html",
  abstract =     "Batching yields significant savings in access costs in
                 sequential, tree-structured, and random files. A direct
                 and simple expression is developed for computing the
                 average number of records\slash pages accessed to
                 satisfy a batched query of a sequential file. The
                 advantages of batching for sequential and random files
                 are discussed. A direct equation is provided for the
                 number of nodes accessed in unbatched queries of
                 hierarchical files. An exact recursive expression is
                 developed for node accesses in batched queries of
                 hierarchical files. In addition to the recursive
                 relationship, good, closed-form upper- and lower-bound
                 approximations are provided for the case of batched
                 queries of hierarchical files.",
  acknowledgement = ack-nhfb,
  affiliation =  "Temple Univ, Dep of Computer \& Information Sciences,
                 Philadelphia, PA, USA",
  affiliationaddress = "Temple Univ, Dep of Computer \& Information
                 Sciences, Philadelphia, PA, USA",
  classification = "723; 901",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "batched searching; database systems; design;
                 hierarchical files; information science --- information
                 retrieval; sequential files, performance; theory",
  subject =      "{\bf E.5}: Data, FILES, Organization/structure. {\bf
                 H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods. {\bf H.3.3}:
                 Information Systems, INFORMATION STORAGE AND RETRIEVAL,
                 Information Search and Retrieval, Search process.",
}

@Article{Bever:1985:DHS,
  author =       "Martin Bever and Peter C. Lockemann",
  title =        "Database Hosting in Strongly-Typed Programming
                 Languages",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "107--126",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-1/p107-bever/p107-bever.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-1/p107-bever/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3327.html",
  abstract =     "Database system support has become an essential part
                 of many computer applications, which have extended
                 beyond the more traditional commercial applications to,
                 among others, engineering applications.
                 Correspondingly, application programming with the need
                 to access databases has progressively shifted to
                 scientifically oriented languages.\par

                 Modern developments in these languages are
                 characterized by advanced mechanisms for the liberal
                 declaration of data types, for type checking, and
                 facilities for modularization of large programs. The
                 present paper examines how a DBMS can be accessed from
                 such a language in a way that conforms to its syntax
                 and utilizes its type-checking facilities, without
                 modifying the language specification itself, and hence
                 its compilers. The basic idea is to rely on facilities
                 for defining modules as separately compilable units,
                 and to use these to declare user-defined abstract data
                 types.\par

                 The idea is demonstrated by an experiment in which a
                 specific DBMS (ADABAS) is hosted in the programming
                 language (LIS). The paper outlines a number of
                 approaches and their problems, shows how to embed the
                 DML into LIS, and how a more user-oriented DML can be
                 provided in LIS.",
  acknowledgement = ack-nhfb,
  acmcrnumber =  "8707-597",
  affiliation =  "Univ Karlsruhe, Inst fuer Informatik, Karlsruhe, West
                 Ger",
  affiliationaddress = "Univ Karlsruhe, Inst fuer Informatik, Karlsruhe,
                 West Ger",
  annote =       "ADABAS is the experimental target system and the
                 language is LIS.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; database hosting,
                 design; database systems; languages; parameterized data
                 types; schema mapping; strongly-typed programming
                 languages",
  subject =      "{\bf H.2.3}: Information Systems, DATABASE MANAGEMENT,
                 Languages, Data manipulation languages (DML). {\bf
                 D.3.3}: Software, PROGRAMMING LANGUAGES, Language
                 Constructs and Features, Abstract data types. {\bf
                 D.3.3}: Software, PROGRAMMING LANGUAGES, Language
                 Constructs and Features, Data types and structures.
                 {\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.1}: Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Schema
                 and subschema. {\bf H.2.3}: Information Systems,
                 DATABASE MANAGEMENT, Languages, Data description
                 languages (DDL).",
}

@Article{Chen:1985:AAS,
  author =       "Wen Chin Chen and Jeffrey Scott Vitter",
  title =        "Addendum to: {``Analysis of Some New Variants of
                 Coalesced Hashing''} [{ACM} Trans. Database Systems
                 {\bf 9} (1984), no. 4, 616--645]",
  journal =      j-TODS,
  volume =       "10",
  number =       "1",
  pages =        "127--127",
  month =        mar,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P10",
  MRnumber =     "794 552",
  bibsource =    "Database/Graefe.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
}

@Article{Vitter:1985:EIO,
  author =       "Jeffrey Scott Vitter",
  title =        "An Efficient {I/O} Interface for Optical Disks",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "129--162",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p129-vitter/p129-vitter.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p129-vitter/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3862.html",
  abstract =     "We introduce the notion of an I/O interface for
                 optical digital (write-once) disks, which is quite
                 different from earlier research. The purpose of an I/O
                 interface is to allow existing operating systems and
                 application programs that use magnetic disks to use
                 optical disks instead, with minimal change. We define
                 what it means for an I/O interface to be
                 disk-efficient. We demonstrate a practical disk-
                 efficient I/O interface and show that its I/O
                 performance in many cases is optimum, up to a constant
                 factor, among all disk-efficient interfaces. The
                 interface is most effective for applications that are
                 not update-intensive. An additional capability is a
                 built-in history mechanism that provides software
                 support for accessing previous versions of records.
                 Even if not implemented, the I/O interface can be used
                 as a programming tool to develop efficient special
                 purpose applications for use with optical disks.",
  acknowledgement = ack-nhfb,
  affiliation =  "Brown Univ, Dep of Computer Science, Providence, RI,
                 USA",
  affiliationaddress = "Brown Univ, Dep of Computer Science, Providence,
                 RI, USA",
  annote =       "An I/O interface supports basic update operations such
                 as insert write and delete on the block is proposed.
                 Index techniques for erasable media (Btree is assumed
                 in this paper) can be implemented on this interface.
                 Versions of a block is stored as an allocation tree on
                 an optical disk, which is an efficient implementation
                 of the pointer fill-in method. Contents of a version of
                 a block is represented by an offset tree. Theoretical
                 lower bound of these operations is evaluated. This
                 paper assumes that appending into existing block is
                 possible on optical disk.",
  classification = "722; 741",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer interfaces; data storage, optical; design;
                 I/O interface; optical disks, algorithms; performance;
                 theory",
  subject =      "{\bf D.4.2}: Software, OPERATING SYSTEMS, Storage
                 Management, Secondary storage. {\bf D.4.2}: Software,
                 OPERATING SYSTEMS, Storage Management,
                 Allocation/deallocation strategies. {\bf D.4.3}:
                 Software, OPERATING SYSTEMS, File Systems Management,
                 Access methods. {\bf D.4.3}: Software, OPERATING
                 SYSTEMS, File Systems Management, File organization.
                 {\bf E.1}: Data, DATA STRUCTURES, Trees. {\bf E.2}:
                 Data, DATA STORAGE REPRESENTATIONS, Hash-table
                 representations. {\bf E.2}: Data, DATA STORAGE
                 REPRESENTATIONS, Linked representations. {\bf F.2.2}:
                 Theory of Computation, ANALYSIS OF ALGORITHMS AND
                 PROBLEM COMPLEXITY, Nonnumerical Algorithms and
                 Problems, Sorting and searching. {\bf G.2.1}:
                 Mathematics of Computing, DISCRETE MATHEMATICS,
                 Combinatorics, Combinatorial algorithms. {\bf H.2.2}:
                 Information Systems, DATABASE MANAGEMENT, Physical
                 Design, Access methods.",
}

@Article{Schkolnick:1985:ECU,
  author =       "M. Schkolnick and P. Tiberio",
  title =        "Estimating the Cost of Updates in a Relational
                 Database",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "163--179",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p163-schkolnick/p163-schkolnick.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p163-schkolnick/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3863.html",
  abstract =     "In this paper, cost formulas are derived for the
                 updates of data and indexes in a relational database.
                 The costs depend on the data scan type and the
                 predicates involved in the update statements. We show
                 that update costs have a considerable influence, both
                 in the context of the physical database design problem
                 and in access path selection in query optimization for
                 relational DBMSs.",
  acknowledgement = ack-nhfb,
  affiliation =  "IBM Research Lab, San Jose, CA, USA",
  affiliationaddress = "IBM Research Lab, San Jose, CA, USA",
  annote =       "tradeoff by a given index query cost against update
                 cost.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "costs; database systems; measurement; performance;
                 query optimization; relational databases; update costs,
                 design",
  subject =      "{\bf H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Query processing.",
}

@Article{Yu:1985:ARC,
  author =       "C. T. Yu and Cheing-Mei Suen and K. Lam and M. K.
                 Siu",
  title =        "Adaptive Record Clustering",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "180--204",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p180-yu/p180-yu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p180-yu/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3861.html",
  abstract =     "An algorithm for record clustering is presented. It is
                 capable of detecting sudden changes in users' access
                 patterns and then suggesting an appropriate assignment
                 of records to blocks. It is conceptually simple, highly
                 intuitive, does not need to classify queries into
                 types, and avoids collecting individual query
                 statistics. Experimental results indicate that it
                 converges rapidly; its performance is about 50 percent
                 better than that of the total sort method, and about
                 100 percent better than that of randomly assigning
                 records to blocks.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Illinois at Chicago Circle, Dep of Electrical
                 Engineering \& Computer Science, Chicago, IL, USA",
  affiliationaddress = "Univ of Illinois at Chicago Circle, Dep of
                 Electrical Engineering \& Computer Science, Chicago,
                 IL, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Experimentation; Measurement; Performance;
                 Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "adaptive record clustering; algorithms; computer
                 programming --- algorithms; database systems;
                 experimentation; file organization; measurement;
                 performance; physical database design; probabilistic
                 retrieval, CTYU TODS; theory; verification, data
                 processing",
  subject =      "{\bf H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design. {\bf E.5}: Data, FILES,
                 Organization/structure. {\bf H.2.m}: Information
                 Systems, DATABASE MANAGEMENT, Miscellaneous. {\bf
                 H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods.",
}

@Article{Katoh:1985:CTS,
  author =       "Naoki Katoh and Toshihide Ibaraki and Tiko Kameda",
  title =        "Cautious Transaction Schedulers with Admission
                 Control",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "205--229",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p205-katoh/p205-katoh.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p205-katoh/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3860.html",
  abstract =     "We propose a new class of schedulers, called {\em
                 cautious schedulers}, that grant an input request if it
                 will not necessitate any rollback in the future. In
                 particular, we investigate cautious WRW-schedulers that
                 output schedules in class WRW only. Class WRW consists
                 of all schedules that are serializable, while
                 preserving the write-read and read-write conflict, and
                 is the largest polynomially {\em recognizable\/}
                 subclass of serializable schedules currently known. It
                 is shown, in this paper however, that cautious WRW-
                 {\em scheduling\/} is, in general, NP-complete.
                 Therefore, we introduce a special type ({\em type
                 1R\/}) of transaction, which consists of no more than
                 one read step (an indivisible set of read operations)
                 followed by multiple write steps. It is shown that
                 cautious WRW-scheduling can be performed efficiently if
                 all transactions are of type 1R and if {\em admission
                 control\/} can be exercised. Admission control rejects
                 a transaction unless its first request is immediately
                 grantable.",
  acknowledgement = ack-nhfb,
  affiliation =  "Kobe Univ of Commerce, Dep of Management Science,
                 Kobe, Japan",
  affiliationaddress = "Kobe Univ of Commerce, Dep of Management
                 Science, Kobe, Japan",
  annote =       "serializability control for predefined transaction
                 sequences.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; database systems; scheduling;
                 serializability; transaction scheduler, algorithms",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing.",
}

@Article{Albano:1985:GST,
  author =       "Antonio Albano and Luca Cardelli and Renzo Orsini",
  title =        "{Galileo}: a Strongly-Typed, Interactive Conceptual
                 Language",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "230--260",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/Functional.bib; Object/Nierstrasz.bib",
  note =         "Also published in \cite{Zdonik:1990:ROO}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p230-albano/p230-albano.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p230-albano/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3859.html",
  abstract =     "Galileo, a programming language for database
                 applications, is presented. Galileo is a
                 strongly-typed, interactive programming language
                 designed specifically to support semantic data model
                 features (classification, aggregation, and
                 specialization), as well as the abstraction mechanisms
                 of modern programming languages (types, abstract types,
                 and modularization). The main contributions of Galileo
                 are (a) a flexible type system to model database
                 structure and semantic integrity constraints; (b) the
                 inclusion of type hierarchies to support the
                 specialization abstraction mechanisms of semantic data
                 models; (c) a modularization mechanism to structure
                 data and operations into interrelated units (d) the
                 integration of abstraction mechanisms into an
                 expression-based language that allows interactive use
                 of the database without resorting to a new stand-alone
                 query language.\par

                 Galileo will be used in the immediate future as a tool
                 for database design and, in the long term, as a
                 high-level interface for DBMSs.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ di Pisa, Dipartmento di Informatica, Pisa,
                 Italy",
  affiliationaddress = "Univ di Pisa, Dipartmento di Informatica, Pisa,
                 Italy",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; data description
                 languages; data manipulation; database systems;
                 functional abstract data types; Galileo; languages;
                 olit-oopl Galileo; query languages, design",
  subject =      "{\bf D.3.2}: Software, PROGRAMMING LANGUAGES, Language
                 Classifications, GALILEO. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Query
                 languages. {\bf H.2.3}: Information Systems, DATABASE
                 MANAGEMENT, Languages, Data description languages
                 (DDL). {\bf H.2.3}: Information Systems, DATABASE
                 MANAGEMENT, Languages, Data manipulation languages
                 (DML). {\bf D.3.3}: Software, PROGRAMMING LANGUAGES,
                 Language Constructs and Features, Abstract data types.
                 {\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
                 Constructs and Features, Data types and structures.
                 {\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.1}: Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Schema
                 and subschema.",
}

@Article{Huang:1985:HBT,
  author =       "Shou-Hsuan Stephen Huang",
  title =        "Height-balanced Trees of Order $ (\beta, \gamma,
                 \delta) $",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "261--284",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P05",
  MRnumber =     "801 578",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p261-huang/p261-huang.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p261-huang/",
  abstract =     "We study restricted classes of B-trees, called $
                 H(\beta, \gamma, \delta) $ trees. A class is defined by
                 three parameters: $ \beta $, the size of a node; $
                 \gamma $, the minimal number of grandsons a node must
                 have; and $ \delta $, the minimal number of leaves
                 bottom nodes must have. This generalizes the brother
                 condition of 2-3 brother trees in a uniform way to
                 B-trees of higher order. The class of B-trees of order
                 m is obtained by choosing $ \beta = m $, $ \gamma = (m
                 / 2)^2 $, and $ \delta = m / 2 $. An algorithm to
                 construct H-trees for any given number of keys is given
                 in Section 1. Insertion and deletion algorithms are
                 given in Section 2. The costs of these algorithms
                 increase smoothly as the parameters are increased.
                 Furthermore, it is proved that the insertion can be
                 done in time $ O(?? + \log N) $, where $N$ is the
                 number of nodes in the tree. Deletion can also be
                 accomplished without reconstructing the entire tree.
                 Properties of H-trees are given in Section 3. It is
                 shown that the height of H-trees decreases as
                 increases, and the storage utilization increases
                 significantly as increases. Finally, comparisons with
                 other restricted classes of B-trees are given in
                 Section 4 to show the attractiveness of H-trees.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Houston, Dep of Computer Science, Houston, TX,
                 USA",
  affiliationaddress = "Univ of Houston, Dep of Computer Science,
                 Houston, TX, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; b-trees; compact b-trees; computer
                 programming --- algorithms; data processing; data
                 structures; dense multiway trees; height-balanced
                 trees; performance",
  subject =      "{\bf E.1}: Data, DATA STRUCTURES, Trees. {\bf H.2.2}:
                 Information Systems, DATABASE MANAGEMENT, Physical
                 Design, Access methods.",
}

@Article{Piwowarski:1985:CBS,
  author =       "Marek Piwowarski",
  title =        "Comments on Batched Searching of Sequential and
                 Tree-Structured Files",
  journal =      j-TODS,
  volume =       "10",
  number =       "2",
  pages =        "285--287",
  month =        jun,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "See \cite{Shneiderman:1976:BSS,Batory:1982:UMP}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-2/p285-piwowarski/p285-piwowarski.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-2/p285-piwowarski/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214294.html",
  abstract =     "Exact formulas for the expected cost savings from
                 batching requests against two types of j-ary trees are
                 given. Approximate expressions are also presented.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "performance",
  subject =      "{\bf H.3.2}: Information Systems, INFORMATION STORAGE
                 AND RETRIEVAL, Information Storage, File organization.
                 {\bf E.1}: Data, DATA STRUCTURES, Trees. {\bf F.2.2}:
                 Theory of Computation, ANALYSIS OF ALGORITHMS AND
                 PROBLEM COMPLEXITY, Nonnumerical Algorithms and
                 Problems, Sorting and searching.",
}

@Article{Ullman:1985:ILQ,
  author =       "Jeffrey D. Ullman",
  title =        "Implementation of Logical Query Languages for
                 Databases",
  journal =      j-TODS,
  volume =       "10",
  number =       "3",
  pages =        "289--321",
  month =        sep,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Ai/nonmono.bib; Ai/prolog.1.bib; Compendex database;
                 Database/Graefe.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Sep., YEAR $=$ 1985",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-3/p289-ullman/p289-ullman.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-3/p289-ullman/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/3980.html",
  abstract =     "We examine methods of implementing queries about
                 relational databases in the case where these queries
                 are expressed in first-order logic as a collection of
                 Horn clauses. Because queries may be defined
                 recursively, straightforward methods of query
                 evaluation do not always work, and a variety of
                 strategies have been proposed to handle subsets of
                 recursive queries. We express such query evaluation
                 techniques as ``capture rules'' on a graph representing
                 clauses and predicates. One essential property of
                 capture rules is that they can be applied
                 independently, thus providing a clean interface for
                 query-evaluation systems that use several different
                 strategies in different situations. Another is that
                 there be an efficient test for the applicability of a
                 given rule. We define basic capture rules corresponding
                 to application of operators from relational algebra, a
                 top-down capture rule corresponding to ``backward
                 chaining,'' that is, repeated resolution of goals, a
                 bottom-up rule, corresponding to ``forward chaining,''
                 where we attempt to deduce all true facts in a given
                 class, and a ``sideways'' rule that allows us to pass
                 results from one goal to another.",
  acknowledgement = ack-nhfb,
  affiliation =  "Stanford Univ, Dep of Computer Science, Stanford, CA,
                 USA",
  affiliationaddress = "Stanford Univ, Dep of Computer Science,
                 Stanford, CA, USA",
  classification = "723",
  conference =   "Sel Pap from the 1985 ACM SIGMOD Conf",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Languages; Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; database systems; Horn
                 clauses; languages; logical query languages; relational
                 databases, Prolog, algorithms; theory; verification",
  meetingaddress = "Austin, TX, USA",
  meetingdate =  "May 28--31 1985",
  meetingdate2 = "05/28--31/85",
  subject =      "{\bf H.2.3}: Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf I.2.3}: Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Logic programming. {\bf I.2.4}:
                 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Knowledge Representation Formalisms and Methods,
                 Predicate logic.",
}

@Article{Anonymous:1985:SPA,
  author =       "Anonymous",
  title =        "Selected Papers from the 1985 {ACM SIGMOD
                 Conference}",
  journal =      j-TODS,
  volume =       "10",
  number =       "3",
  pages =        "289--346",
  month =        sep,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Nov 10 07:59:49 1998",
  bibsource =    "Compendex database; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  abstract =     "This issue contains 2 conference papers. The topics
                 covered are: logical query languages for databases; and
                 modeling concepts for VLSI CAD objects.",
  acknowledgement = ack-nhfb,
  classification = "714; 723",
  conference =   "Selected Papers from the 1985 ACM SIGMOD Conference.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  journalabr =   "ACM Transactions on Database Systems",
  keywords =     "CAD; database systems; design automation; integrated
                 circuits, VLSI --- computer aided design; logical query
                 languages; relational databases",
  meetingaddress = "Austin, TX, USA",
  sponsor =      "ACM, Special Interest Group on Management of Data, New
                 York, NY, USA",
}

@Article{Batory:1985:MCV,
  author =       "D. S. Batory and Won Kim",
  title =        "Modeling Concepts for {VLSI CAD} Objects",
  journal =      j-TODS,
  volume =       "10",
  number =       "3",
  pages =        "322--346",
  month =        sep,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: ACM-SIGMOD 1985.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-3/p322-batory/p322-batory.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-3/p322-batory/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/4018.html",
  abstract =     "VLSI CAD applications deal with design objects that
                 have an interface description and an implementation
                 description. Versions of design objects have a common
                 interface but differ in their implementations. A
                 molecular object is a modeling construct which enables
                 a database entity to be represented by two sets of
                 heterogeneous records, one set describes the object's
                 interface and the other describes its implementation.
                 Thus a reasonable starting point for modeling design
                 objects is to begin with the concept of molecular
                 objects.\par

                 In this paper, we identify modeling concepts that are
                 fundamental to capturing the semantics of VLSI CAD
                 design objects and versions in terms of molecular
                 objects. A provisional set of user operations on design
                 objects, consistent with these modeling concepts, is
                 also defined. The modeling framework that we present
                 has been found useful for investigating physical
                 storage techniques and change notification problems in
                 version control.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Texas at Austin, Dep of Computer Sciences,
                 Austin, TX, USA",
  affiliationaddress = "Univ of Texas at Austin, Dep of Computer
                 Sciences, Austin, TX, USA",
  classification = "714; 723",
  conference =   "Sel Pap from the 1985 ACM SIGMOD Conf",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "CAD; Computer Aided Design; database systems; design
                 automation; integrated circuits, VLSI; languages;
                 relational databases; storage techniques, design;
                 version control",
  meetingaddress = "Austin, TX, USA",
  meetingdate =  "May 28--31 1985",
  meetingdate2 = "05/28--31/85",
  subject =      "{\bf B.7.1}: Hardware, INTEGRATED CIRCUITS, Types and
                 Design Styles, VLSI (very large scale integration).",
}

@Article{Subieta:1985:SQL,
  author =       "Kazimierz Subieta",
  title =        "Semantics of Query Languages for Network Databases",
  journal =      j-TODS,
  volume =       "10",
  number =       "3",
  pages =        "347--394",
  month =        sep,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-3/p347-subieta/p347-subieta.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-3/p347-subieta/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214293.html",
  abstract =     "Semantics determines the meaning of language
                 constructs; hence it says much more than syntax does
                 about implementing the language. The main purpose of
                 this paper is a formal presentation of the meaning of
                 basic language constructs employed in many database
                 languages (sublanguages). Therefore, stylized query
                 languages SSL (Sample Selection Language) and J (Joins)
                 are introduced, wherein most of the typical entries
                 present in other query languages are collected. The
                 semantics of SSL and J are defined by means of the
                 denotational method and explained informally. In SSL
                 and J, four types of expressions are introduced: a
                 selector (denotes a set of addresses), a term (denotes
                 a set of values), a formula (denotes a truth value),
                 and a join (denotes a set of n-tuples of addresses or
                 values). In many cases alternative semantics are given
                 and discussed. In order to obtain more general
                 properties of the proposed languages, a new database
                 access model is introduced, intended to be a tool for
                 the description of the logical access paths to data. In
                 particular, the access paths of the network and
                 relational models can be described. SSL and J
                 expressions may be addressed to both data structures.
                 In the case of the relational model, expressions of J
                 are similar to SQL or QUEL statements. Thus J may be
                 considered a generalization of relational query
                 languages for the network model. Finally, a programming
                 language, based on SSL and J, is outlined, and the
                 issues of SSL and J implementation are considered.",
  acknowledgement = ack-nhfb,
  affiliation =  "Polish Acad of Sciences, Inst of Computer Science,
                 Warsaw, Pol",
  affiliationaddress = "Polish Acad of Sciences, Inst of Computer
                 Science, Warsaw, Pol",
  classification = "723",
  conference =   "Sel Pap from the 1985 ACM SIGMOD Conf",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; data manipulation
                 languages; database systems; denotational semantics;
                 query languages; query optimization, languages;
                 theory",
  meetingaddress = "Austin, TX, USA",
  meetingdate =  "May 28--31 1985",
  meetingdate2 = "05/28--31/85",
  subject =      "{\bf H.2.3}: Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Data
                 manipulation languages (DML).",
}

@Article{Liew:1985:DDP,
  author =       "Chong K. Liew and Uinam J. Choi and Chung J. Liew",
  title =        "A Data Distortion by Probability Distribution",
  journal =      j-TODS,
  volume =       "10",
  number =       "3",
  pages =        "395--411",
  month =        sep,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-3/p395-liew/p395-liew.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-3/p395-liew/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/4017.html",
  abstract =     "This paper introduces data distortion by probability
                 distribution, a probability distortion that involves
                 three steps. The first step is to identify the
                 underlying density function of the original series and
                 to estimate the parameters of this density function.
                 The second step is to generate a series of data from
                 the estimated density function. And the final step is
                 to map and replace the generated series for the
                 original one. Because it is replaced by the distorted
                 data set, probability distortion guards the privacy of
                 an individual belonging to the original data set. At
                 the same time, the probability distorted series
                 provides asymptotically the same statistical properties
                 as those of the original series, since both are under
                 the same distribution. Unlike conventional point
                 distortion, probability distortion is difficult to
                 compromise by repeated queries, and provides a maximum
                 exposure for statistical analysis.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Oklahoma, Norman, OK, USA",
  affiliationaddress = "Univ of Oklahoma, Norman, OK, USA",
  annote =       "analysis of pollution technique.",
  classification = "723",
  conference =   "Sel Pap from the 1985 ACM SIGMOD Conf",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Security",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data distortion; database systems; probability;
                 probability distortion; security; statistical
                 databases, statistical security; TODS, algorithms",
  meetingaddress = "Austin, TX, USA",
  meetingdate =  "May 28--31 1985",
  meetingdate2 = "05/28--31/85",
  subject =      "{\bf H.2.0}: Information Systems, DATABASE MANAGEMENT,
                 General, Security, integrity, and protection. {\bf
                 H.2.7}: Information Systems, DATABASE MANAGEMENT,
                 Database Administration. {\bf G.3}: Mathematics of
                 Computing, PROBABILITY AND STATISTICS, Statistical
                 computing. {\bf G.3}: Mathematics of Computing,
                 PROBABILITY AND STATISTICS.",
}

@Article{Tay:1985:LPC,
  author =       "Y. C. Tay and Nathan Goodman and Rajan Suri",
  title =        "Locking Performance in Centralized Databases",
  journal =      j-TODS,
  volume =       "10",
  number =       "4",
  pages =        "415--462",
  month =        dec,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-4/p415-tay/p415-tay.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-4/p415-tay/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/4880.html",
  abstract =     "An analytic model is used to study the performance of
                 dynamic locking. The analysis uses only the
                 steady-state average values of the variables. The
                 solution to the model is given by a cubic, which has
                 exactly one valid root for the range of parametric
                 values that is of interest. The model's predictions
                 agree well with simulation results for transactions
                 that require up to twenty locks. The model separates
                 data contention from resource contention, thus
                 facilitating an analysis of their separate effects and
                 their interaction. It shows that systems with a
                 particular form of nonuniform access, or with shared
                 locks, are equivalent to systems with uniform access
                 and only exclusive locks.\par

                 Blocking due to conflicts is found to impose an upper
                 bound on transaction throughput; this fact leads to a
                 rule of thumb on how much data contention should be
                 permitted in a system. Throughput can exceed this bound
                 if a transaction is restarted whenever it encounters a
                 conflict, provided restart costs and resource
                 contention are low. It can also be exceeded by making
                 transactions predeclare their locks. Raising the
                 multiprogramming level to increase throughput also
                 raises the number of restarts per completion.
                 Transactions should minimize their lock requests,
                 because data contention is proportional to the square
                 of the number of requests. The choice of how much data
                 to lock at a time depends on which part of a general
                 granularity curve the system sees.",
  acknowledgement = ack-nhfb,
  affiliation =  "Natl Univ of Singapore, Singapore, Singapore",
  affiliationaddress = "Natl Univ of Singapore, Singapore, Singapore",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Measurement; Performance; Theory;
                 Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; data contention; database
                 locking; database systems; measurement; performance;
                 resource contention, algorithms; theory; verification",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing. {\bf H.2.2}:
                 Information Systems, DATABASE MANAGEMENT, Physical
                 Design, Deadlock avoidance. {\bf C.4}: Computer Systems
                 Organization, PERFORMANCE OF SYSTEMS, Modeling
                 techniques. {\bf C.2.2}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Network Protocols.
                 {\bf C.2.1}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Network Architecture
                 and Design, Centralized networks.",
}

@Article{Batory:1985:MSA,
  author =       "D. S. Batory",
  title =        "Modeling the Storage Architectures of Commercial
                 Database Systems",
  journal =      j-TODS,
  volume =       "10",
  number =       "4",
  pages =        "463--528",
  month =        dec,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-4/p463-batory/p463-batory.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-4/p463-batory/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5392.html",
  abstract =     "Modeling the storage structures of a DBMS is a
                 prerequisite to understanding and optimizing database
                 performance. Previously, such modeling was very
                 difficult because the fundamental role of
                 conceptual-to-internal mappings in DBMS implementations
                 went unrecognized.\par

                 In this paper we present a model of physical databases,
                 called the transformation model, that makes
                 conceptual-to-internal mappings explicit. By exposing
                 such mappings, we show that it is possible to model the
                 storage architectures (i.e., the storage structures and
                 mappings) of many commercial DBMSs in a precise,
                 systematic, and comprehensible way. Models of the
                 INQUIRE, ADABAS, and SYSTEM 2000 storage architectures
                 are presented as examples of the model's utility.
                 \par

                 We believe the transformation model helps bridge the
                 gap between physical database theory and practice. It
                 also reveals the possibility of a technology to
                 automate the development of physical database
                 software.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Texas at Austin, Austin, TX, USA",
  affiliationaddress = "Univ of Texas at Austin, Austin, TX, USA",
  annote =       "considers ADABAS, INQUIRE, SYSTEM2000 in depth.
                 Classification of linksets. modeling storage methods of
                 Inquire, ADABAS, and System 2000.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Documentation",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing --- data structures; database systems;
                 documentation; storage architectures, design",
  subject =      "{\bf H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods. {\bf E.5}: Data,
                 FILES, Organization/structure. {\bf H.2.m}: Information
                 Systems, DATABASE MANAGEMENT, Miscellaneous.",
}

@Article{Agrawal:1985:ICC,
  author =       "Rakesh Agrawal and David J. Dewitt",
  title =        "Integrated Concurrency Control and Recovery
                 Mechanisms: Design and Performance Evaluation",
  journal =      j-TODS,
  volume =       "10",
  number =       "4",
  pages =        "529--564",
  month =        dec,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-4/p529-agrawal/p529-agrawal.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-4/p529-agrawal/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/4958.html",
  abstract =     "In spite of the wide variety of concurrency control
                 and recovery mechanisms proposed during the past
                 decade, the behavior and the performance of various
                 concurrency control and recovery mechanisms remain
                 largely not well understood. In addition, although
                 concurrency control and recovery mechanisms are
                 intimately related, the interaction between them has
                 not been adequately explored. In this paper, we take a
                 unified view of the problems associated with
                 concurrency control and recovery for
                 transaction-oriented multiuser centralized database
                 management systems, and we present several integrated
                 mechanisms. We then develop analytical models to study
                 the behavior and compare the performance of these
                 integrated mechanisms, and we present the results of
                 our performance evaluation.",
  acknowledgement = ack-nhfb,
  affiliation =  "AT\&T Bell Lab, Murray Hill, NJ, USA",
  affiliationaddress = "AT\&T Bell Lab, Murray Hill, NJ, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control; database systems; design;
                 measurement; performance; recovery mechanisms;
                 transaction processing, algorithms",
  subject =      "{\bf H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Recovery and restart. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems. {\bf
                 H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Deadlock avoidance. {\bf D.4.1}:
                 Software, OPERATING SYSTEMS, Process Management,
                 Concurrency. {\bf D.4.3}: Software, OPERATING SYSTEMS,
                 File Systems Management.",
}

@Article{Borgida:1985:LFF,
  author =       "Alexander Borgida",
  title =        "Language Features for Flexible Handling of Exceptions
                 in Information Systems",
  journal =      j-TODS,
  volume =       "10",
  number =       "4",
  pages =        "565--603",
  month =        dec,
  year =         "1985",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/database.bib;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: Rutgers Un., TR-LCSR-70, rev.
                 Mar. 1985.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1985-10-4/p565-borgida/p565-borgida.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1985-10-4/p565-borgida/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/4995.html",
  abstract =     "An exception-handling facility suitable for languages
                 used to implement database-intensive information
                 systems is presented. Such a mechanism facilitates the
                 development and maintenance of more flexible software
                 systems by supporting the abstraction of details
                 concerning special or abnormal occurrences. The type
                 constraints imposed by the schema as well as various
                 semantic integrity assertions are considered to be
                 normalcy conditions, and the key contribution of this
                 work is to allow exceptions to these constraints to
                 persist. To achieve this, solutions are proposed to a
                 range of problems, including sharing and computing with
                 exceptional information, exception handling by users,
                 the logic of constraints with exceptions, and
                 implementation issues. The use of exception handling in
                 dealing with null values, estimates, and measurement is
                 also illustrated.",
  acknowledgement = ack-nhfb,
  affiliation =  "Rutgers Univ, Dep of Computer Science, New Brunswick,
                 NJ, USA",
  affiliationaddress = "Rutgers Univ, Dep of Computer Science, New
                 Brunswick, NJ, USA",
  annote =       "Adding exception handling to database systems to deal
                 with unusual, unknown, or otherwise exceptional
                 attribute values. A semantic extension that may inspire
                 KBMSers. I have a report in my office on the
                 possibilities of this approach, by Alex Borgida of
                 Rutgers. It's very readable, and it may inspire someone
                 to cook up such a scheme of his or her own for Naxos,
                 thesis, or whatever. -----Marianne W. W.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages; Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; database systems;
                 exception handling; languages; semantic integrity;
                 theory; type constraints, design; verification",
  subject =      "{\bf D.2.5}: Software, SOFTWARE ENGINEERING, Testing
                 and Debugging, Error handling and recovery. {\bf
                 H.2.0}: Information Systems, DATABASE MANAGEMENT,
                 General, Security, integrity, and protection. {\bf
                 H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Data
                 description languages (DDL). {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Data
                 manipulation languages (DML).",
}

@Article{Hagmann:1986:PAS,
  author =       "Robert Brian Hagmann and Domenico Ferrari",
  title =        "Performance Analysis of Several Back-End Database
                 Architectures",
  journal =      j-TODS,
  volume =       "11",
  number =       "1",
  pages =        "1--26",
  month =        mar,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-1/p1-hagmann/p1-hagmann.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-1/p1-hagmann/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5242.html",
  abstract =     "The growing acceptance of database systems makes their
                 performance increasingly more important. One way to
                 gain performance is to off-load some of the functions
                 of the database system to aback-end computer. The
                 problem is what functions should be off-loaded to
                 maximize the benefits of distributed processing.
                 \par

                 Our approach to this problem consisted of constructing
                 several variants of an existing relational database
                 system. INGRES, that partition the database system
                 software into two parts, and assigning these two parts
                 to two computers connected by a local area network. For
                 the purposes of this experiment, six different variants
                 of the database software were constructed to test the
                 sir most interesting functional subdivisions. Each
                 variant was then benchmarked using two different
                 databases and query streams. The communication medium
                 and the communication software were also benchmarked to
                 measure their contribution to the performance of each
                 configuration.\par

                 Combining the database and network measurement results,
                 various conclusions were reached about the viability of
                 the configurations, the desirable properties of the
                 communications mechanisms to he used, the operating
                 system interface and overhead, and the performance of
                 the database system. The variants to be preferred
                 depend on the hardware technology, operating system
                 features, database system internal structure, and
                 network software overhead.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of California, Berkely, CA, USA",
  affiliationaddress = "Univ of California, Berkely, CA, USA",
  annote =       "an experimental methodology using INGRES.",
  classification = "722; 723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Experimentation; Measurement; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "back-end database architectures; computer networks ---
                 local networks; computer systems, digital ---
                 distributed; database systems; experimentation; Ingres
                 database system; measurement; performance; relational
                 databases, hardware support database machine TODS,
                 design",
  subject =      "{\bf H.2.6}: Information Systems, DATABASE MANAGEMENT,
                 Database Machines. {\bf C.2.4}: Computer Systems
                 Organization, COMPUTER-COMMUNICATION NETWORKS,
                 Distributed Systems. {\bf C.4}: Computer Systems
                 Organization, PERFORMANCE OF SYSTEMS. {\bf H.2.0}:
                 Information Systems, DATABASE MANAGEMENT, General,
                 INGRES.",
}

@Article{Garcia-Molina:1986:ABA,
  author =       "H{\'e}ctor Garc{\'\i}a-Molina and Frank Pittelli and
                 Susan Davidson",
  title =        "Applications of {Byzantine} Agreement in Database
                 Systems",
  journal =      j-TODS,
  volume =       "11",
  number =       "1",
  pages =        "27--47",
  month =        mar,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/contents/journals/tods/;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-1/p27-molina/p27-molina.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-1/p27-molina/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5243.html",
  abstract =     "In this paper we study when and how a Byzantine
                 agreement protocol can be used in general-purpose
                 database management, systems. We present an overview of
                 the failure model used for Bizantine agreement, and of
                 the protocol itself. We then present correctness
                 criteria for database processing in this failure
                 environment and discuss strategies for satisfying them.
                 In doing this, we present new failure models for
                 input\slash output nodes and study ways to distribute
                 input transactions to processing nodes under these
                 models. Finally, we investigate applications of
                 Byzantine agreement protocols in the more common
                 failure environment where processors are assumed to
                 halt after a failure.",
  acknowledgement = ack-nhfb,
  affiliation =  "Princeton Univ, Princeton, NJ, USA",
  affiliationaddress = "Princeton Univ, Princeton, NJ, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; Byzantine agreement protocol; data
                 processing; database systems; distributed; failure
                 models; fault tolerance; reliability",
  subject =      "{\bf D.4.5}: Software, OPERATING SYSTEMS, Reliability,
                 Fault-tolerance. {\bf H.2.4}: Information Systems,
                 DATABASE MANAGEMENT, Systems, Distributed systems. {\bf
                 C.2.2}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Network Protocols.
                 {\bf C.2.4}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Distributed Systems,
                 Distributed databases.",
}

@Article{Segev:1986:OJO,
  author =       "Arie Segev",
  title =        "Optimization of Join Operations in Horizontally
                 Partitioned Database Systems",
  journal =      j-TODS,
  volume =       "11",
  number =       "1",
  pages =        "48--80",
  month =        mar,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-1/p48-segev/p48-segev.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-1/p48-segev/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5241.html",
  abstract =     "This paper analyzes the problem of joining two
                 horizontally partitioned relations in a distributed
                 database system. Two types of semijoin strategies are
                 introduced, local and remote. Local semijoins are
                 performed at the site of the restricted relation (or
                 fragment), and remote semijoins can be performed at an
                 arbitrary site. A mathematical model of a semijoin
                 strategy for the case of remote semijoins is developed,
                 and lower bounding and heuristic procedures are
                 proposed. The results of computational experiments are
                 reported. The experiments include an analysis of the
                 heuristics' performance relative to the lower bounds,
                 sensitivity analysis, and error analysis. These results
                 reveal a good performance of the heuristic procedures,
                 and demonstrate the benefit of using semijoin
                 operations to reduce the size of fragments prior to
                 their transmission. The algorithms for the case of
                 remote semijoins were found to be superior to the
                 algorithms for the case of local semijoins. In
                 addition, we found that the estimation accuracy of the
                 selectivity factors has a significant effect on the
                 incurred communication cost.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of California, Berkely, CA, USA",
  affiliationaddress = "Univ of California, Berkely, CA, USA",
  classification = "723; 921",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- algorithms; database systems;
                 distributed; horizontally partitioned database systems,
                 query processing optimization tods; join operations;
                 mathematical models; optimization",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf G.2.2}: Mathematics of
                 Computing, DISCRETE MATHEMATICS, Graph Theory, Trees.
                 {\bf C.2.4}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Distributed Systems,
                 Distributed databases. {\bf H.2.4}: Information
                 Systems, DATABASE MANAGEMENT, Systems, Distributed
                 systems. {\bf G.2.1}: Mathematics of Computing,
                 DISCRETE MATHEMATICS, Combinatorics, Combinatorial
                 algorithms.",
}

@Article{Gyssens:1986:CJD,
  author =       "Marc Gyssens",
  title =        "On the Complexity of Join Dependencies",
  journal =      j-TODS,
  volume =       "11",
  number =       "1",
  pages =        "81--108",
  month =        mar,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "87g:68011",
  MRreviewer =   "J. Paredaens",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-1/p81-gyssens/p81-gyssens.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-1/p81-gyssens/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5237.html",
  abstract =     "In [10] a method is proposed for decomposing join
                 dependencies (jds) in a relational database using the
                 notion of a hinge. This method was subsequently studied
                 in [11] and [12]. We show how the technique of
                 decomposition can be used to make integrity checking
                 more efficient. It turns out that it is important to
                 find a decomposition that minimizes the number of edges
                 of its largest element. We show that the decompositions
                 obtained with the method described in [10] are optimal
                 in this respect. This minimality criterion leads to the
                 definition of the {\em degree of cyclicity}, which
                 allows us to classify jds and leads to the notion of
                 {\em n-cyclicity}, of which acyclicity is a special
                 case for n = 2. We then show that, for a fixed value of
                 n (which may be greater than 2). integrity checking can
                 be performed in polynomial time provided we restrict
                 ourselves to {\em n-cyclic\/} jds. Finally, we
                 generalize a well-known characterization for acyclic
                 jds by proving that n-cyclicity is equivalent to
                 ``n-wise consistency implies global consistency.'' As a
                 consequence, consistency checking can be performed in
                 polynomial time if we restrict ourselves to n-cyclic
                 jds, for a tired value of n, not necessarily equal to
                 2.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Antwerp, Antwerp, Belg",
  affiliationaddress = "Univ of Antwerp, Antwerp, Belg",
  classification = "723; 921",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "CYCLICITY; database systems; DECOMPOSITION; JOIN
                 DEPENDENCIES; MATHEMATICAL TECHNIQUES --- Graph Theory;
                 Relational",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Normal forms. {\bf G.2.2}: Mathematics
                 of Computing, DISCRETE MATHEMATICS, Graph Theory, Graph
                 algorithms. {\bf H.2.1}: Information Systems, DATABASE
                 MANAGEMENT, Logical Design, Schema and subschema. {\bf
                 G.2.2}: Mathematics of Computing, DISCRETE MATHEMATICS,
                 Graph Theory, Trees.",
}

@Article{Sacco:1986:FTE,
  author =       "Giovanni Maria Sacco",
  title =        "Fragmentation: a technique for Efficient Query
                 Processing",
  journal =      j-TODS,
  volume =       "11",
  number =       "2",
  pages =        "113--133",
  month =        jun,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: University of Torino, TR., Aug.
                 1983.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-2/p113-sacco/p113-sacco.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-2/p113-sacco/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5638.html",
  abstract =     "A `divide and conquer' strategy to compute natural
                 joins by sequential scans on unordered relations is
                 described. This strategy is shown to always be better
                 than merging scans when both relations must be sorted
                 before joining, and generally better in practical cases
                 when only the largest relation must be sorted.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ di Torino",
  affiliationaddress = "Turin, Italy",
  annote =       "Join by hashing: Create fragments by hashing, as many
                 fragments as buffers can be allocated in memory. Then
                 repeat that for the other relation. Then do a nested
                 unsorted join, as Kim, W. 1980, on the fragment
                 pairs.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Economics; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- Algorithms; database systems;
                 divide-and-conquer algorithms; economics;
                 fragmentation; natural joins, join hash partitioning
                 overflow avoidance recursion parallelism TODS,
                 algorithms; performance; query processing",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing.",
}

@Article{Beeri:1986:IAL,
  author =       "Catriel Beeri and Michael Kifer",
  title =        "An Integrated Approach to Logical Design of Relational
                 Database Schemes",
  journal =      j-TODS,
  volume =       "11",
  number =       "2",
  pages =        "134--158",
  month =        jun,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "848 633",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; Distributed/gesturing.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-2/p134-beeri/p134-beeri.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-2/p134-beeri/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214291.html",
  abstract =     "We propose a new approach to the design of relational
                 database schemes. The main features of the approach are
                 the following:\par

                 A combination of the traditional decomposition and
                 synthesis approaches, thus allowing the use of both
                 functional and multivalued dependencies.
                 \par

                 Separation of structural dependencies relevant for the
                 design process from integrity constraints, that is,
                 constraints that do not bear any structural information
                 about the data and which should therefore be discarded
                 at the design stage. This separation is supported by a
                 simple syntactic test filtering out nonstructural
                 dependencies.\par

                 Automatic correction of schemes which lack certain
                 desirable properties.",
  acknowledgement = ack-nhfb,
  affiliation =  "Hebrew Univ of Jerusalem, Jerusalem, Isr",
  affiliationaddress = "Hebrew Univ of Jerusalem, Jerusalem, Isr",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "acyclic schemes, design; database systems;
                 decomposition; functional dependencies; multivalued
                 dependencies; relational; synthesis; theory",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.1}:
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Normal forms.",
}

@Article{Mendelson:1986:IIC,
  author =       "Haim Mendelson and Aditya N. Saharia",
  title =        "Incomplete Information Costs and Database Design",
  journal =      j-TODS,
  volume =       "11",
  number =       "2",
  pages =        "159--185",
  month =        jun,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-2/p159-mendelson/p159-mendelson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-2/p159-mendelson/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5678.html",
  abstract =     "This paper presents a methodology for trading-off the
                 cost of incomplete information against the data-related
                 costs in the design of database systems. It
                 investigates how the usage patterns of the database,
                 defined by the characteristics of information requests
                 presented to it, affect its conceptual design. The
                 construction of minimum-cost answers to information
                 requests for a variety of query types and cost
                 structures is also studied. The resulting costs of
                 incomplete database information are balanced against
                 the data-related costs in the derivation of the optimal
                 design.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Rochester, Rochester, NY, USA",
  affiliationaddress = "Univ of Rochester, Rochester, NY, USA",
  annote =       "information value, missing data, decision theory
                 framework, applied to ships in the Mediterranean.",
  classification = "723; 921",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Economics; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data-related costs, design; database systems; design;
                 economics; incomplete information costs; optimization;
                 theory",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design. {\bf H.1.1}: Information Systems,
                 MODELS AND PRINCIPLES, Systems and Information Theory.
                 {\bf H.3.3}: Information Systems, INFORMATION STORAGE
                 AND RETRIEVAL, Information Search and Retrieval.",
}

@Article{Ginsburg:1986:CTS,
  author =       "Seymour Ginsburg and Katsumi Tanaka",
  title =        "Computation-Tuple Sequences and Object Histories",
  journal =      j-TODS,
  volume =       "11",
  number =       "2",
  pages =        "186--212",
  month =        jun,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "848 634",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-2/p186-ginsburg/p186-ginsburg.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-2/p186-ginsburg/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5924.html",
  abstract =     "A record-based, algebraically-oriented model is
                 introduced for describing data for ``object histories''
                 (with computation), such as checking accounts, credit
                 card accounts, taxes, schedules, and so on. The model
                 consists of sequences of computation tuples defined by
                 a computation-tuple sequence scheme (CSS). The CSS has
                 three major features (in addition to input data):
                 computation (involving previous computation tuples),
                 ``uniform'' constraints (whose satisfaction by a
                 computation-tuple sequence $u$ implies satisfaction by
                 every interval of $u$ ), and specific sequences with
                 which to start the valid computation-tuple sequences. A
                 special type of CSS, called ``local,'' is singled out
                 for its relative simplicity in maintaining the validity
                 of a computation-tuple sequence. A necessary and
                 sufficient condition for a CSS to be equivalent to at
                 least one local CSS is given. Finally, the notion of
                 ``local bisimulatability'' is introduced for regarding
                 two CSS as conveying the same information, and two
                 results on local bisimulatability in connection with
                 local CSS are established.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Southern California, Los Angeles, CA, USA",
  affiliationaddress = "Univ of Southern California, Los Angeles, CA,
                 USA",
  annote =       "Sequential history tuples and objects with input,
                 computation, and result. Some constraints applied per
                 sequential entry cause satisfaction of global
                 constraints. Temporal issues are very specific.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Theory; Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computation history; data description; database state
                 transitions; database systems; theory; transaction
                 processing, algorithms; verification",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.1.0}: Information
                 Systems, MODELS AND PRINCIPLES, General. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Transaction processing.",
}

@Article{Garg:1986:OPK,
  author =       "Anil K. Garg and C. C. Gotlieb",
  title =        "Order-Preserving Key Transformations",
  journal =      j-TODS,
  volume =       "11",
  number =       "2",
  pages =        "213--234",
  month =        jun,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-2/p213-garg/p213-garg.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-2/p213-garg/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/5923.html",
  abstract =     "File organizations based on conventional hash
                 functions provide faster access to the stored records
                 in comparison with tree-like file structures. Tree
                 structures such as B** plus -trees and ISAM do provide
                 for sequential processing, but require considerable
                 storage for the indices. When sequential processing is
                 needed a table that performs an order-preserving
                 transformation on keys can be used. H is an
                 order-preserving key transform if H(K//1) greater than
                 equivalent to H(K//2), for all keys K//1 greater than
                 K//2. We present methodologies for constructing such
                 key transforms, and illustrate them for some real-life
                 key sets. Storage requirements for the table needed to
                 carry out the transformation are less than those needed
                 for the indices.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Toronto, Toronto, Ont, Can",
  affiliationaddress = "Univ of Toronto, Toronto, Ont, Can",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Management; Measurement;
                 Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "access methods, algorithms; data processing; database
                 systems; design; dynamic files; file organization; key
                 transformations; management; measurement;
                 order-preserving hashing; performance; theory",
  subject =      "{\bf E.5}: Data, FILES, Organization/structure. {\bf
                 H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods.",
}

@Article{Shapiro:1986:JPD,
  author =       "Leonard D. Shapiro",
  title =        "Join Processing in Database Systems with Large Main
                 Memories",
  journal =      j-TODS,
  volume =       "11",
  number =       "3",
  pages =        "239--264",
  month =        sep,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/database.bib; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-3/p239-shapiro/p239-shapiro.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-3/p239-shapiro/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/6315.html",
  abstract =     "We study algorithms for computing the equijoin of two
                 relations in a system with a standard architecture hut
                 with large amounts of main memory. Our algorithms are
                 especially efficient when the main memory available is
                 a significant fraction of the size of one of the
                 relations to he joined; but they can be applied
                 whenever there is memory equal to approximately the
                 square root of the size of one relation. We present a
                 new algorithm which is a hybrid of two hash-based
                 algorithms and which dominates the other algorithms we
                 present, including sort-merge. Even in a virtual memory
                 environment, the hybrid algorithm dominates all the
                 others we study.\par

                 Finally, we describe how three popular tools to
                 increase the efficiency of joins, namely filters, Babb
                 arrays, and semijoins, can he grafted onto any of our
                 algorithms.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "design; memory query evaluation classical simple
                 hybrid hash joins TODS, algorithms; performance",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf H.2.0}: Information
                 Systems, DATABASE MANAGEMENT, General. {\bf H.2.6}:
                 Information Systems, DATABASE MANAGEMENT, Database
                 Machines. {\bf H.2.2}: Information Systems, DATABASE
                 MANAGEMENT, Physical Design.",
}

@Article{Gavish:1986:SQO,
  author =       "Bezalel Gavish and Arie Segev",
  title =        "Set Query Optimization in Distributed Database
                 Systems",
  journal =      j-TODS,
  volume =       "11",
  number =       "3",
  pages =        "265--293",
  month =        sep,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-3/p265-gavish/p265-gavish.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-3/p265-gavish/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/6488.html",
  abstract =     "This paper addresses the problem of optimizing queries
                 that involve set operations (set queries) in a
                 distributed relational database system. A particular
                 emphasis is put on the optimization of such queries in
                 horizontally partitioned database systems. A
                 mathematical programming model of the set query problem
                 is developed and its NP-completeness is proved.
                 Solution procedures are proposed and computational
                 results presented. One of the main results of the
                 computational experiments is that, for many queries,
                 the solution procedures are not sensitive to errors in
                 estimating the size of results of set operations.",
  acknowledgement = ack-nhfb,
  annote =       "The time complexity is NP-complete. Three
                 approximations.",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; languages; theory",
  subject =      "{\bf C.2.4}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Distributed Systems,
                 Distributed databases. {\bf G.2.2}: Mathematics of
                 Computing, DISCRETE MATHEMATICS, Graph Theory, Trees.
                 {\bf H.2.3}: Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf H.2.4}: Information
                 Systems, DATABASE MANAGEMENT, Systems, Query
                 processing.",
}

@Article{Lafortune:1986:STM,
  author =       "St{\'e}phane Lafortune and Eugene Wong",
  title =        "A State Transition Model for Distributed Query
                 Processing",
  journal =      j-TODS,
  volume =       "11",
  number =       "3",
  pages =        "294--322",
  month =        sep,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/database.bib; Database/Graefe.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/des.bib; Misc/Discrete.event.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-3/p294-lafortune/p294-lafortune.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-3/p294-lafortune/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/6460.html",
  abstract =     "A state transition model for the optimization of query
                 processing in a distributed database system is
                 presented. The problem is parameterized by means of a
                 state describing the amount of processing that has been
                 performed at each site where the database is located. A
                 state transition occurs each time a new join or
                 semijoin is executed. Dynamic programming is used to
                 compute recursively the costs of the states and the
                 globally optimal solution, taking into account
                 communication and local processing costs. The state
                 transition model is general enough to account for the
                 possibility of parallel processing among the various
                 sites, as well as for redundancy in the database. The
                 model also permits significant reductions of the
                 necessary computations by taking advantage of simple
                 additivity and site-uniformity properties of a cost
                 model, and of clever strategies that improve on the
                 basic dynamic programming algorithm.",
  acknowledgement = ack-nhfb,
  bib =          "koz",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; design; performance; theory, Optimization
                 TODS",
  subject =      "{\bf C.2.4}: Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Distributed Systems,
                 Distributed databases. {\bf H.2.4}: Information
                 Systems, DATABASE MANAGEMENT, Systems, Distributed
                 systems. {\bf H.2.4}: Information Systems, DATABASE
                 MANAGEMENT, Systems, Query processing.",
}

@Article{Lozinskii:1986:POI,
  author =       "Eliezer L. Lozinskii",
  title =        "A Problem-Oriented Inferential Database System",
  journal =      j-TODS,
  volume =       "11",
  number =       "3",
  pages =        "323--356",
  month =        sep,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15 (68T20)",
  MRnumber =     "87k:68025",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-3/p323-lozinskii/p323-lozinskii.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-3/p323-lozinskii/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/6419.html",
  abstract =     "Recently developed inferential database systems face
                 some common problems: a very fast growth of search
                 space and difficulties in recognizing inference
                 termination (especially for recursive axioms). These
                 shortcomings stem mainly from the fact that the
                 inference process is usually separated from database
                 operations. A problem-oriented inferential system i8
                 described which refers to the database prior to query
                 (or subquery) processing, so that the inference from
                 the very beginning is directed by data relevant to the
                 query. A multiprocessor implementation of the system is
                 presented based on a computer network conforming to
                 database relations and axioms. The system provides an
                 efficient indication of query termination, and is
                 complete in the sense that it produces all correct
                 answers to a query in a finite time.",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance; Theory;
                 Verification",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; design; performance; theory;
                 verification",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf I.2.3}: Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Deduction. {\bf C.1.3}: Computer
                 Systems Organization, PROCESSOR ARCHITECTURES, Other
                 Architecture Styles, Data-flow architectures.",
}

@Article{Osborn:1986:DRD,
  author =       "Sylvia L. Osborn and T. E. Heaven",
  title =        "The Design of a Relational Database System with
                 Abstract Data Types for Domains",
  journal =      j-TODS,
  volume =       "11",
  number =       "3",
  pages =        "357--373",
  month =        sep,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-3/p357-osborn/p357-osborn.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-3/p357-osborn/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/6461.html",
  abstract =     "An extension to the relational model is described in
                 which domains can he arbitrarily defined as abstract
                 data types. Operations on these data types include
                 primitive operations, aggregates, and transformations.
                 It is shown that these operations make the query
                 language complete in the sense of Chandra and Harel.
                 The system has been designed in such a way that new
                 data types and their operations can be defined with a
                 minimal amount of interaction with the database
                 management system.",
  acknowledgement = ack-nhfb,
  annote =       "Operations on simple objects, operations on aggregates
                 and `transformations' can be defined on relations. It
                 is possible to implement a transitive closure RAD uses
                 the data dictionary. ---Ong, Fogg and Stonebraker.",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "design; languages; theory",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf D.3.3}: Software,
                 PROGRAMMING LANGUAGES, Language Constructs and
                 Features, Abstract data types. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages.",
}

@Article{Gawlick:1986:RIW,
  author =       "Dieter Gawlick",
  title =        "Report on the International Workshop on
                 High-Performance Transaction Systems",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "375--377",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p375-gawlick/p375-gawlick.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p375-gawlick/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/17346.html",
  acknowledgement = ack-nhfb,
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "design; performance",
  subject =      "{\bf A.0}: General Literature, GENERAL. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Transaction processing. {\bf C.4}: Computer Systems
                 Organization, PERFORMANCE OF SYSTEMS, Reliability,
                 availability, and serviceability.",
}

@Article{Mohan:1986:TMR,
  author =       "C. Mohan and B. Lindsay and R. Obermarck",
  title =        "Transaction Management in the {R*} Distributed
                 Database Management System",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "378--396",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p378-mohan/p378-mohan.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p378-mohan/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/7266.html",
  abstract =     "This paper deals with the transaction management
                 aspects of the R* distributed database system. It
                 concentrates primarily on the description of the R*
                 commit protocols, Presumed Abort (PA) and Presumed
                 Commit (PC). PA and PC are extensions of the
                 well-known, two-phase (2P) commit protocol. PA is
                 optimized for read-only transactions and a class of
                 multisite update transactions, and PC is optimized for
                 other classes of multisite update transactions. The
                 optimizations result in reduced intersite message
                 traffic and log writes, and, consequently, a better
                 response time. The paper also discusses R*'s approach
                 toward distributed deadlock detection and resolution.",
  acknowledgement = ack-nhfb,
  affiliation =  "IBM, San Jose, CA, USA",
  affiliationaddress = "IBM, San Jose, CA, USA",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "commit protocols; concurrency control, RSTAR TODS,
                 algorithms; database systems; deadlock victim
                 selection; design; distributed; optimization;
                 reliability; transaction management",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Distributed systems. {\bf C.2.4}: Computer Systems
                 Organization, COMPUTER-COMMUNICATION NETWORKS,
                 Distributed Systems, Distributed databases. {\bf
                 D.4.1}: Software, OPERATING SYSTEMS, Process
                 Management, Concurrency. {\bf D.4.1}: Software,
                 OPERATING SYSTEMS, Process Management, Deadlocks. {\bf
                 D.4.1}: Software, OPERATING SYSTEMS, Process
                 Management, Synchronization. {\bf D.4.5}: Software,
                 OPERATING SYSTEMS, Reliability, Fault-tolerance. {\bf
                 D.4.7}: Software, OPERATING SYSTEMS, Organization and
                 Design, Distributed systems. {\bf H.2.2}: Information
                 Systems, DATABASE MANAGEMENT, Physical Design, Recovery
                 and restart. {\bf H.2.7}: Information Systems, DATABASE
                 MANAGEMENT, Database Administration, Logging and
                 recovery.",
}

@Article{Bayer:1986:CTR,
  author =       "Rudolf Bayer",
  title =        "Consistency of Transactions and Random Batch",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "397--404",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p397-bayer/p397-bayer.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p397-bayer/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214287.html",
  abstract =     "A synchronization technique and scheduling strategy is
                 described, which allows us to run a batch process
                 simultaneously with on-line transactions. The batch
                 process and the transactions are serialized in such a
                 way that consistency level 3 is achieved.",
  acknowledgement = ack-nhfb,
  affiliation =  "Technische Univ Muenchen, West Ger",
  affiliationaddress = "Technische Univ Muenchen, West Ger",
  classification = "723; 913",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "concurrency control, algorithms; consistency of
                 transactions; database systems; design; performance;
                 random batch; scheduling; synchronization",
  subject =      "{\bf D.4.1}: Software, OPERATING SYSTEMS, Process
                 Management. {\bf D.4.7}: Software, OPERATING SYSTEMS,
                 Organization and Design. {\bf E.5}: Data, FILES. {\bf
                 H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design. {\bf H.2.4}: Information Systems,
                 DATABASE MANAGEMENT, Systems.",
}

@Article{ONeil:1986:ETM,
  author =       "Patrick E. O'Neil",
  title =        "The {Escrow} Transactional Method",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "405--430",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p405-o_neil/p405-o_neil.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p405-o_neil/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/7265.html",
  abstract =     "A method is presented for permitting record updates by
                 long-lived transactions without forbidding simultaneous
                 access by other users to records modified. Earlier
                 methods presented separately by Gawlick and Reuter are
                 comparable but concentrate on ``hot-spot'' situations,
                 where even short transactions cannot lock frequently
                 accessed fields without causing bottlenecks. The Escrow
                 Method offered here is designed to support nonblocking
                 record updates by transactions that are ``long lived''
                 and thus require long periods to complete.
                 Recoverability of intermediate results prior to commit
                 thus becomes a design goal, so that updates as of a
                 given time can be guaranteed against memory or media
                 failure while still retaining the prerogative to abort.
                 This guarantee basically completes phase one of a
                 two-phase commit, and several advantages result: (1) As
                 with Gawlick's and Reuter's methods, high-concurrency
                 items in the database will not act as a bottleneck; (2)
                 transaction commit of different updates can be
                 performed asynchronously, allowing natural distributed
                 transactions; indeed, distributed transactions in the
                 presence of delayed messages or occasional line
                 disconnection become feasible in a way that we argue
                 will tie up minimal resources for the purpose intended;
                 and (3) it becomes natural to allow for human
                 interaction in the middle of a transaction without loss
                 of concurrent access or any special difficulty for the
                 application programmer. The Escrow Method, like
                 Gawlick's Fast Path and Reuter's Method, requires the
                 database system to be an ``expert'' about the type of
                 transactional updates performed, most commonly updates
                 involving incremental changes to aggregate quantities.
                 However, the Escrow Method is extendable to other types
                 of updates.",
  acknowledgement = ack-nhfb,
  affiliation =  "Computer Corp of America, Cambridge, MA, USA",
  affiliationaddress = "Computer Corp of America, Cambridge, MA, USA",
  annote =       "For aggregate values (counts, sum) concurrency control
                 can use soft tolerances and keep them in escrow",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database systems; design; escrow transactional method;
                 long-lived transactions; multiuser environment, locking
                 quantities, not variables TODS, algorithms; nested
                 transactions; performance; theory; two-phase commit",
  subject =      "{\bf D.4.1}: Software, OPERATING SYSTEMS, Process
                 Management, Concurrency. {\bf D.4.1}: Software,
                 OPERATING SYSTEMS, Process Management, Deadlocks. {\bf
                 H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Recovery and restart. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Distributed systems. {\bf H.2.4}: Information Systems,
                 DATABASE MANAGEMENT, Systems, Escrow. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Transaction processing.",
}

@Article{Hsu:1986:PTP,
  author =       "Meichun Hsu and Arvola Chan",
  title =        "Partitioned Two-Phase Locking",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "431--446",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p431-hsu/p431-hsu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p431-hsu/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/7477.html",
  abstract =     "In a large integrated database, there often exists an
                 ``information hierarchy,'' where both raw data and
                 derived data are stored and used together. Therefore,
                 among update transactions, there will often be some
                 that perform only read accesses from a certain (i.e.,
                 the ``raw'' data) portion of the database and write
                 into another (i.e., the ``derived'' data) portion. A
                 conventional concurrency control algorithm would have
                 treated such transactions as regular update
                 transactions and subjected them to the usual protocols
                 for synchronizing update transactions. In this paper
                 such transactions are examined more closely. The
                 purpose is to devise concurrency control methods that
                 allow the computation of derived information to proceed
                 without interfering with the updating of raw data.
                 \par

                 The first part of the paper presents a proof method for
                 correctness of concurrency control algorithms in a
                 hierarchically decomposed database. The proof method
                 provides a framework for understanding the intricacies
                 in dealing with hierarchically decomposed databases.
                 The second part of the paper is an application of the
                 proof method to show the correctness of a
                 two-phase-locking- based algorithm, called partitioned
                 two-phase locking, for hierarchically decomposed
                 databases. This algorithm is a natural extension to the
                 Version Pool method proposed previously in the
                 literature.",
  acknowledgement = ack-nhfb,
  affiliation =  "Harvard Univ, Cambridge, MA, USA",
  affiliationaddress = "Harvard Univ, Cambridge, MA, USA",
  annote =       "revisions also for update",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- algorithms; concurrency
                 control; database systems; theory; transaction
                 processing, algorithms; two-phase locking",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems. {\bf H.2.2}: Information Systems, DATABASE
                 MANAGEMENT, Physical Design.",
}

@Article{Luk:1986:EEL,
  author =       "W. S. Luk and Steve Kloster",
  title =        "{ELFS}: {English} Language from {SQL}",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "447--472",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sun Dec 8 08:54:10 MST 1996",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  abstract =     "In this paper we describe a system which, given a
                 query in SQL-like relational database language, will
                 display its meaning in clear, unambiguous natural
                 language. The syntax-driven translation mechanism is
                 independent of the application domain. It has direct
                 applications in designing computer-based SQL tutorial
                 systems and program debugging systems. The research
                 results obtained in the paper will also be useful in
                 query optimization and design of a more user-friendly
                 language front-end for casual users.",
  acknowledgement = ack-nhfb,
  affiliation =  "Simon Fraser Univ, Burnaby, BC, Can",
  affiliationaddress = "Simon Fraser Univ, Burnaby, BC, Can",
  annote =       "display meaning in natural language is independent of
                 the application domain.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming --- Program Debugging; database
                 systems; ELFS; Query Languages; relational database
                 language; SQL",
}

@Article{Sacco:1986:BMR,
  author =       "Giovanni Maria Sacco and Mario Schkolnick",
  title =        "Buffer Management in Relational Database Systems",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "473--498",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p473-sacco/p473-sacco.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p473-sacco/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/7336.html",
  abstract =     "The hot-set model, characterizing the buffer
                 requirements of relational queries, is presented. This
                 model allows the system to determine the optimal buffer
                 space to be allocated to a query; it can also be used
                 by the query optimizer to derive efficient execution
                 plans accounting for the available buffer space, and by
                 a query scheduler to prevent thrashing. The hot-set
                 model is compared with the working-set model. A
                 simulation study is presented.",
  acknowledgement = ack-nhfb,
  acmcr =        "8708-0695",
  affiliation =  "Univ di Torino",
  affiliationaddress = "Turin, Italy",
  annote =       "The hot-set model provides a more meaningful measure
                 of cost than simple I/O counts.",
  classification = "723; 913",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "buffer management; database systems; performance;
                 query optimizer, algorithms; query processing;
                 relational; scheduling; theory",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf H.2.2}: Information
                 Systems, DATABASE MANAGEMENT, Physical Design. {\bf
                 D.4.2}: Software, OPERATING SYSTEMS, Storage
                 Management.",
}

@Article{Ariav:1986:TOD,
  author =       "Gad Ariav",
  title =        "A Temporally Oriented Data Model",
  journal =      j-TODS,
  volume =       "11",
  number =       "4",
  pages =        "499--527",
  month =        dec,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1986-11-4/p499-ariav/p499-ariav.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1986-11-4/p499-ariav/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/7350.html",
  abstract =     "The research into time and data models has so far
                 focused on the identification of extensions to the
                 classical relational model that would provide it with
                 ``adequate'' semantic capacity to deal with time. The
                 temporally oriented data model (TODM) presented in this
                 paper is a result of a different approach, namely, it
                 directly operationalizes the pervasive
                 three-dimensional metaphor for time. One of the main
                 results is thus the development of the notion of the
                 data cube: a three-dimensional and inherently temporal
                 data construct where time, objects, and attributes are
                 the primary dimensions of stored data. TODM's cube adds
                 historical depth to the tabular notions of data and
                 provides a framework for storing and retrieving data
                 within their temporal context. The basic operations in
                 the model allow the formation of new cubic views from
                 existing ones, or viewing data as one moves up and down
                 in time within cubes.\par

                 This paper introduces TODM, a consistent set of
                 temporally oriented data constructs, operations, and
                 constraints, and then presents TOSQL, a corresponding
                 end-user's SQL-like query syntax. The model is a
                 restricted but consistent superset of the relational
                 model, and the query syntax incorporates temporal
                 notions in a manner that likewise avoids penalizing
                 users who are interested solely in the current view of
                 data (rather than in a temporal perspective). The
                 naturalness of the spatial reference to time and the
                 added semantic capacity of TODM come with a price--the
                 definitions of the cubic constructs and basic
                 operations are relatively cumbersome. As rudimentary as
                 it is, TODM nonetheless provides a comprehensive basis
                 for formulating an external data model for a temporally
                 oriented database.",
  acknowledgement = ack-nhfb,
  affiliation =  "New York Univ, New York, NY, USA",
  affiliationaddress = "New York Univ, New York, NY, USA",
  annote =       "at least one timestamp, time of record, plus other
                 temporal --- event stamps.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data cube; database systems; information modeling;
                 languages; relational; temporally oriented data model;
                 theory; TODM, design; TOSQL",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Data
                 manipulation languages (DML). {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Query
                 languages.",
}

@Article{Albano:1986:OSG,
  author =       "Antonio Albano and S. Alfo and Luca Cardelli and Renzo
                 Orsini",
  title =        "An Overview of {SIDEREUS}: a Graphical Database Schema
                 Editor for {Galileo}",
  journal =      j-TODS,
  volume =       "11",
  number =       "??",
  pages =        "568--571",
  month =        "????",
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:51:20 1996",
  bibsource =    "Distributed/gesturing.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  xxnote =       "This paper does not seem to be published in TODS.",
}

@Article{Durand:1986:FMS,
  author =       "Charles Durand",
  title =        "Forward Multidimensional Search with Applications to
                 Information Retrieval",
  journal =      j-TODS,
  volume =       "??",
  number =       "??",
  pages =        "??--??",
  month =        sep,
  year =         "1986",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Tue Dec 10 12:51:25 1996",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Submitted.",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  xxnote =       "This paper does not seem to be published in TODS.",
}

@Article{Spyratos:1987:PMD,
  author =       "Nicolas Spyratos",
  title =        "The partition model: a deductive database model",
  journal =      j-TODS,
  volume =       "12",
  number =       "1",
  pages =        "1--37",
  month =        mar,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: Institut National de la
                 Recherche en Informatique et Automatique, TR-286, Apr.
                 1983.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-1/p1-spyratos/p1-spyratos.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-1/p1-spyratos/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/22718.html",
  abstract =     "We present a new database model in which each
                 attribute is modeled by a family of disjoint subsets of
                 an underlying population of objects. Such a family is
                 called a partitioning, and the set of all partitionings
                 is turned into a lattice by appropriately defining
                 product and sum. A database is seen as a function from
                 a sublattice into the lattice of partitionings. The
                 model combines the following features:\par

                 (1) syntactic simplicity (essentially that of the
                 relational model),\par

                 (2) powerful means for the specification of semantic
                 information (in the form of lattice equations), and
                 \par

                 (3) deductive capability (essentially that of set
                 theory).\par

                 The relational model of data and the basic constructs
                 of semantic modeling can be embedded into our model in
                 a simple and straightforward manner.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ de Paris-Sud, Orsay, Fr",
  affiliationaddress = "Univ de Paris-Sud, Orsay, Fr",
  annote =       "Type hierarchies and lattices.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database semantics; database systems; deductive
                 database model; partition model; theory",
  subject =      "{\bf F.3.2}: Theory of Computation, LOGICS AND
                 MEANINGS OF PROGRAMS, Semantics of Programming
                 Languages, Algebraic approaches to semantics. {\bf
                 H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models.",
}

@Article{Wu:1987:ASM,
  author =       "C. T. Wu and Walter A. Burkhard",
  title =        "Associative Searching in Multiple Storage Units",
  journal =      j-TODS,
  volume =       "12",
  number =       "1",
  pages =        "38--64",
  month =        mar,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Discusses interpolation hashing, a multidimensional
                 variant of linear hashing.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-1/p38-wu/p38-wu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-1/p38-wu/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/12048.html",
  abstract =     "A file maintenance model, called the multiple random
                 access storage units model, is introduced. Storage
                 units can be accessed simultaneously, and the parallel
                 processing of an associative query is achieved by
                 distributing data evenly among the storage units.
                 Maximum parallelism is obtained when data satisfying an
                 associative query are evenly distributed for every
                 possible query. An allocation scheme called $M$-cycle
                 allocation is proposed to maintain large files of data
                 on multiple random access storage units. The allocation
                 scheme provides an efficient and straightforward
                 indexing over multidimensional key spaces and supports
                 the parallel processing of orthogonal range queries.
                 Our analysis shows that $M$-cycle allocation achieves
                 the near-optimum parallelism for processing the
                 orthogonal range queries. Moreover, there is no
                 duplication of records and no increase in
                 insertion\slash deletion cost.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of California, San Diego, CA, USA",
  affiliationaddress = "Univ of California, San Diego, CA, USA",
  classification = "723; 903",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "associative searching; data processing --- file
                 organization; database systems; design; file
                 maintenance model; information science --- information
                 retrieval; multiple storage units; performance; random
                 access, algorithms; theory",
  subject =      "{\bf E.5}: Data, FILES. {\bf E.1}: Data, DATA
                 STRUCTURES. {\bf H.2.2}: Information Systems, DATABASE
                 MANAGEMENT, Physical Design.",
}

@Article{Lomet:1987:PEF,
  author =       "David B. Lomet",
  title =        "Partial Expansions for File Organizations with an
                 Index",
  journal =      j-TODS,
  volume =       "12",
  number =       "1",
  pages =        "65--84",
  month =        mar,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-1/p65-lomet/p65-lomet.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-1/p65-lomet/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/12049.html",
  abstract =     "A new way to increase file space in dynamically
                 growing files is introduced in which substantial
                 improvement in file utilization can be achieved. It
                 makes use of partial expansions in which, instead of
                 doubling the space associated with some part of the
                 file, the space grows at a slower rate. Unlike previous
                 versions of partial expansion in which the number of
                 buckets involved in file growth is increased by less
                 than a factor of two, the new method expands file space
                 by increasing bucket size via `elastic buckets'. This
                 permits partial expansions to be used with a wide range
                 of indexed files, including B-trees. The results of
                 using partial expansions are analyzed, and the analysis
                 confirmed by a simulation study. The analysis and
                 simulation demonstrate that the file utilization gains
                 are substantial and that fears of excessive insertion
                 cost resulting from more frequent file growth are
                 unfounded.",
  acknowledgement = ack-nhfb,
  affiliation =  "Wang Inst of Graduate Studies, Tyngboro, MA, USA",
  affiliationaddress = "Wang Inst of Graduate Studies, Tyngboro, MA,
                 USA",
  annote =       "a way to increase file space with substantial
                 improvement in file utilization elastic buckets come in
                 a number of sizes.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; database systems; File Organization;
                 FILE UTILIZATION; INSERTION COST; PARTIAL EXPANSIONS",
  subject =      "{\bf D.4.3}: Software, OPERATING SYSTEMS, File Systems
                 Management, Access methods. {\bf D.4.3}: Software,
                 OPERATING SYSTEMS, File Systems Management, File
                 organization. {\bf H.2.2}: Information Systems,
                 DATABASE MANAGEMENT, Physical Design, Access methods.
                 {\bf H.3.2}: Information Systems, INFORMATION STORAGE
                 AND RETRIEVAL, Information Storage, File
                 organization.",
}

@Article{Fedorowicz:1987:DPE,
  author =       "Jane Fedorowicz",
  title =        "Database Performance Evaluation in an Indexed File
                 Environment",
  journal =      j-TODS,
  volume =       "12",
  number =       "1",
  pages =        "85--110",
  month =        mar,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-1/p85-fedorowicz/p85-fedorowicz.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-1/p85-fedorowicz/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/13675.html",
  abstract =     "The use of database systems for managerial decision
                 making often incorporates information-retrieval
                 capabilities with numeric report generation. Of great
                 concern to the user of such a system is the response
                 time associated with issuing a query to the database.
                 This study presents a procedure for estimating response
                 time for one of the most frequently encountered
                 physical storage mechanisms, the indexed file. The
                 model provides a fairly high degree of accuracy, but is
                 simple enough so that the cost of applying the model is
                 not exorbitant. The model incorporates the knowledge
                 that the distribution of access key occurrences is
                 known to follow Zipf's law. It first estimates the
                 access time required to complete the query, which
                 includes the time needed for all input and output
                 transactions, and CPU time used in performing the
                 search. The effects of multiple users on an
                 individual's response time are then assessed using a
                 simple regression estimation technique. The two-step
                 procedure allows for the separation of access time from
                 multiuser influences.",
  acknowledgement = ack-nhfb,
  affiliation =  "Boston Univ, Boston, MA, USA",
  affiliationaddress = "Boston Univ, Boston, MA, USA",
  annote =       "a procedure for estimating response time; distribution
                 of access key occurrences follow Zipf's law. Early
                 version with Kellogg, J. L. Model provides a fairly
                 high degree of accuracy but is simple. The effects of
                 multiple users are assessed using simple regression
                 estimation.",
  classification = "723; 912; 922",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing --- File organization; database
                 performance; database systems; indexed file
                 environment; MANAGEMENT --- Information Systems;
                 multiple users, design; Performance; performance;
                 response time; statistical methods --- regression
                 analysis; Zipf's law",
  subject =      "{\bf D.4.3}: Software, OPERATING SYSTEMS, File Systems
                 Management, File organization. {\bf H.2.2}: Information
                 Systems, DATABASE MANAGEMENT, Physical Design, Access
                 methods. {\bf H.3.2}: Information Systems, INFORMATION
                 STORAGE AND RETRIEVAL, Information Storage, File
                 organization. {\bf H.3.3}: Information Systems,
                 INFORMATION STORAGE AND RETRIEVAL, Information Search
                 and Retrieval, Retrieval models.",
}

@Article{Ozsoyoglu:1987:NNF,
  author =       "Z. Meral {\"O}zsoyo{\u{g}}lu and Li-Yan Yuan",
  title =        "A New Normal Form for Nested Relations",
  journal =      j-TODS,
  volume =       "12",
  number =       "1",
  pages =        "111--136",
  month =        mar,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "886 100",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-1/p111-ozsoyoglu/p111-ozsoyoglu.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-1/p111-ozsoyoglu/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/13676.html",
  abstract =     "We consider nested relations whose schemes are
                 structured as trees, called scheme trees, and introduce
                 a normal form for such relations, called the nested
                 normal form. Given a set of attributes $U$, and a set
                 of multivalued dependencies (MVDs) $M$ over these
                 attributes, we present an algorithm to obtain a nested
                 normal form decomposition of $U$ with respect to $M$.
                 Such a decomposition has several desirable properties,
                 such as explicitly representing a set of full and
                 embedded MVDs implied by $M$, and being a faithful and
                 nonredundant representation of $U$. Moreover, if the
                 given set of MVDs is conflict-free, then the nested
                 normal form decomposition is also
                 dependency-preserving. Finally, we show that if $M$ is
                 conflict-free, then the set of root-to-leaf paths of
                 scheme trees in nested normal form decomposition is
                 precisely the unique 4NF decomposition $ [9, 16] $ of
                 $U$ with respect to $M$.",
  acknowledgement = ack-nhfb,
  affiliation =  "Case Western Reserve Univ, Cleveland, OH, USA",
  affiliationaddress = "Case Western Reserve Univ, Cleveland, OH, USA",
  annote =       "non-first normal form.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; data structures; database systems ---
                 design; decomposition, algorithms; design; multivalued
                 dependency; nested relations; normal form; theory",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.1}: Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Normal
                 forms.",
}

@Article{Christodoulakis:1987:ARP,
  author =       "Stavros Christodoulakis",
  title =        "Analysis of Retrieval Performance for Records and
                 Objects using Optical Disk Technology",
  journal =      j-TODS,
  volume =       "12",
  number =       "2",
  pages =        "137--169",
  month =        jun,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-2/p137-christodoulakis/p137-christodoulakis.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-2/p137-christodoulakis/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/23015.html",
  abstract =     "In this paper we examine the problem of object and
                 record retrieval from optical disks. General objects
                 (such as images, documents, etc.) may be long and their
                 length may have high variance. We assume that all the
                 components of an object are stored consecutively in
                 storage to speed-up retrieval performance. We first
                 present an optical disk model and an optimal schedule
                 for retrieval of records and objects which qualify in a
                 single query on a file stored on an optical disk
                 device. We then provide {\em exact\/} and {\em
                 approximate\/} analytic results for evaluating the
                 retrieval performance for objects from an optical disk.
                 The analysis provides some basic analytic tools for
                 studying the performance of various file and database
                 organizations for optical disks. The results involve
                 probability distribution of block accesses, probability
                 distributions of span accesses, and probability
                 distribution of seek times. Record retrieval is an
                 important special case. This analysis differs from
                 similar ones in database environments in the following
                 respects: (1) the large size and large variance of the
                 size of objects; (2) crossing of track boundaries by
                 objects; (3) the capability for span access that
                 optical disks provide (e.g., when the optical assembly
                 is located in a given position, information can be read
                 from a number of consecutive tracks (span) with a small
                 additional cost).",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Waterloo, Waterloo, Ont, Can",
  affiliationaddress = "Univ of Waterloo, Waterloo, Ont, Can",
  classification = "723; 741; 903",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Measurement; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data storage, optical --- storage devices; information
                 retrieval; information science; optical disk
                 technology, measurement; performance; retrieval
                 performance; theory",
  subject =      "{\bf C.4}: Computer Systems Organization, PERFORMANCE
                 OF SYSTEMS, Modeling techniques. {\bf D.4.3}: Software,
                 OPERATING SYSTEMS, File Systems Management, File
                 organization.",
}

@Article{Herlihy:1987:DQA,
  author =       "Maurice Herlihy",
  title =        "Dynamic Quorum Adjustment for Partitioned Data",
  journal =      j-TODS,
  volume =       "12",
  number =       "2",
  pages =        "170--194",
  month =        jun,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-2/p170-herlihy/p170-herlihy.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-2/p170-herlihy/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/22953.html",
  abstract =     "A partition occurs when functioning sites in a
                 distributed system are unable to communicate. This
                 paper introduces a new method for managing replicated
                 data objects in the presence of partitions. Each
                 operation provided by a replicated object has a set of
                 quorums, which are sets of sites whose cooperation
                 suffices to execute the operation. The method permits
                 an object's quorums to be adjusted dynamically in
                 response to failures and recoveries. A transaction that
                 is unable to progress using one set of quorums may
                 switch to another, more favorable set, and transactions
                 in different partitions may progress using different
                 sets. This method has three novel aspects: (1) it
                 supports a wider range of quorums than earlier
                 proposals, (2) it scales up effectively to large
                 systems because quorum adjustments do not require
                 global reconfiguration, and (3) it systematically
                 exploits the semantics of typed objects to support more
                 flexible quorum adjustment.",
  acknowledgement = ack-nhfb,
  affiliation =  "Carnegie-Mellon Univ, Pittsburgh, PA, USA",
  affiliationaddress = "Carnegie-Mellon Univ, Pittsburgh, PA, USA",
  annote =       "Each operation provided by a replicated object has a
                 set of quorums, sites whose cooperation suffices to
                 execute the operation.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Languages; Reliability",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer systems, digital --- distributed; database
                 systems; distributed; dynamic quorum adjustment;
                 languages; partitioned data, algorithms; reliability",
  subject =      "{\bf D.3.3}: Software, PROGRAMMING LANGUAGES, Language
                 Constructs and Features, Abstract data types. {\bf
                 D.4.3}: Software, OPERATING SYSTEMS, File Systems
                 Management, Distributed file systems. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Distributed systems. {\bf D.3.3}: Software, PROGRAMMING
                 LANGUAGES, Language Constructs and Features, Data types
                 and structures. {\bf D.4.5}: Software, OPERATING
                 SYSTEMS, Reliability, Fault-tolerance. {\bf H.2.4}:
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Transaction processing.",
}

@Article{Ellis:1987:CLH,
  author =       "Carla Schlatter Ellis",
  title =        "Concurrency in Linear Hashing",
  journal =      j-TODS,
  volume =       "12",
  number =       "2",
  pages =        "195--217",
  month =        jun,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in \cite{ACM:1985:PFA}.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-2/p195-ellis/p195-ellis.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-2/p195-ellis/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/22954.html",
  abstract =     "Concurrent access to complex shared data structures,
                 particularly structures useful as database indices, has
                 long been of interest in the database community. In
                 dynamic databases, tree structures such as B-trees have
                 been used as indices because of their ability to handle
                 growth; whereas hashing has been used for fast access
                 in relatively static databases. Recently, a number of
                 techniques for dynamic hashing have appeared. They
                 address the major deficiency of traditional hashing
                 when applied to databases that experience significant
                 change in the amount of data being stored. This paper
                 presents a solution that allows concurrency in one of
                 these dynamic hashing data structures, namely linear
                 hash files. The solution is based on locking protocols
                 and minor modifications in the data structures.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Rochester, USA",
  affiliationaddress = "Univ of Rochester, USA",
  annote =       "Searching can proceed in parallel with splits. Also
                 discusses distributed access.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "complex shared data structures, algorithms; concurrent
                 access; data processing; Data Structures; database
                 systems; linear hashing",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Concurrency. {\bf D.4.1}: Software, OPERATING
                 SYSTEMS, Process Management,
                 Multiprocessing/multiprogramming. {\bf E.1}: Data, DATA
                 STRUCTURES. {\bf H.2.2}: Information Systems, DATABASE
                 MANAGEMENT, Physical Design. {\bf H.3.2}: Information
                 Systems, INFORMATION STORAGE AND RETRIEVAL, Information
                 Storage, File organization.",
}

@Article{Valduriez:1987:JI,
  author =       "Patrick Valduriez",
  title =        "Join Indices",
  journal =      j-TODS,
  volume =       "12",
  number =       "2",
  pages =        "218--246",
  month =        jun,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/database.bib;
                 Database/Graefe.bib; Database/Wiederhold.bib;
                 http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Compares join indices with inverted indices, clustered
                 indices, B+ trees, linked lists, and hybrid hash
                 techniques.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-2/p218-valduriez/p218-valduriez.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-2/p218-valduriez/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/22955.html",
  abstract =     "In new application areas of relational database
                 systems, such as artificial intelligence, the join
                 operator is used more extensively than in conventional
                 applications. In this paper, we propose a simple data
                 structure, called a join index, for improving the
                 performance of joins in the context of complex queries.
                 For most of the joins, updates to join indices incur
                 very little overhead. Some properties of a join index
                 are (i) its efficient use of memory and adaptiveness to
                 parallel execution, (ii) its compatibility with other
                 operations (including select and union), (iii) its
                 support for abstract data type join predicates, (iv)
                 its support for multirelation clustering, and (v) its
                 use in representing directed graphs and in evaluating
                 recursive queries. Finally, the analysis of the join
                 algorithm using join indices shows its excellent
                 performance.",
  acknowledgement = ack-nhfb,
  affiliation =  "Microelectronics \& Computer Technology Corp, Austin,
                 TX, USA",
  affiliationaddress = "Microelectronics \& Computer Technology Corp,
                 Austin, TX, USA",
  annote =       "arrays of combined indices are maintained to
                 precompute joins among tuples. The technique is very
                 similar to that implemented as ADABAS correlators.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "abstract data types; computer programming ---
                 Algorithms; data processing --- Data Structures;
                 database systems; design; join algorithm, including
                 semi-join join index with rid list from selection index
                 TODS, algorithms; JOIN index; multirelation clustering;
                 performance; Relational",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf E.1}: Data, DATA
                 STRUCTURES, Trees. {\bf E.5}: Data, FILES,
                 Organization/structure. {\bf H.2.2}: Information
                 Systems, DATABASE MANAGEMENT, Physical Design, Access
                 methods. {\bf H.2.4}: Information Systems, DATABASE
                 MANAGEMENT, Systems, Query processing. {\bf H.3.1}:
                 Information Systems, INFORMATION STORAGE AND RETRIEVAL,
                 Content Analysis and Indexing, Indexing methods.",
}

@Article{Snodgrass:1987:TQL,
  author =       "Richard Snodgrass",
  title =        "The {Temporal Query Language TQUEL}",
  journal =      j-TODS,
  volume =       "12",
  number =       "2",
  pages =        "247--298",
  month =        jun,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib;
                 Misc/is.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-2/p247-snodgrass/p247-snodgrass.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-2/p247-snodgrass/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/22956.html",
  abstract =     "Recently, attention has been focused on {\em temporal
                 databases}, representing an enterprise over time. We
                 have developed a new language, {\em Tquel}, to query a
                 temporal database. TQuel was designed to be a minimal
                 extension, both syntactically and semantically, of
                 Quel, the query language in the Ingres relational
                 database management system. This paper discusses the
                 language informally, then provides a tuple relational
                 calculus semantics for the TQuel statements that differ
                 from their Quel counterparts, including the
                 modification statements. The three additional temporal
                 constructs defined in Tquel are shown to be direct
                 semantic analogues of Quel's where clause and target
                 list. We also discuss reducibility of the semantics to
                 Quel's semantics when applied to a static database.
                 TQuel is compared with ten other query languages
                 supporting time.",
  acmcrnumber =  "8712-1006",
  affiliation =  "Univ of North Carolina, Chapel Hill, NC, USA",
  affiliationaddress = "Univ of North Carolina, Chapel Hill, NC, USA",
  annote =       "Describes extensions to Quel to handle temporal
                 queries. Three kinds of temporal information are
                 handled: `Transaction time', when information was
                 stored in the database, `valid time' when the stored
                 info models reality, and `user-defined time' explicitly
                 stored by user in the database.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer programming languages; database systems;
                 relational calculus; temporal databases; temporal query
                 language; theory; TQUEL; tuple calculus, languages",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.7}: Information
                 Systems, DATABASE MANAGEMENT, Database Administration,
                 Logging and recovery. {\bf H.2.3}: Information Systems,
                 DATABASE MANAGEMENT, Languages, Query languages. {\bf
                 H.2.3}: Information Systems, DATABASE MANAGEMENT,
                 Languages, TQUEL.",
}

@Article{Wong:1987:MIR,
  author =       "S. K. M. Wong and W. Ziarko and V. V. Raghavan and P.
                 C. N. Wong",
  title =        "On Modeling of Information Retrieval Concepts in
                 Vector Spaces",
  journal =      j-TODS,
  volume =       "12",
  number =       "2",
  pages =        "299--321",
  month =        jun,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-2/p299-wong/p299-wong.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-2/p299-wong/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/22957.html",
  abstract =     "The Vector Space Model (VSM) has been adopted in
                 information retrieval as a means of coping with inexact
                 representation of documents and queries, and the
                 resulting difficulties in determining the relevance of
                 a document relative to a given query. The major problem
                 in employing this approach is that the explicit
                 representation of term vectors is not known a priori.
                 Consequently, earlier researchers made the assumption
                 that the vectors corresponding to terms are pairwise
                 orthogonal. Such an assumption is clearly unrealistic.
                 Although attempts have been made to compensate for this
                 assumption by some separate, corrective steps, such
                 methods are ad hoc and, in most cases, formally
                 inconsistent.\par

                 In this paper, a generalization of the VSM, called the
                 GVSM, is advanced. The developments provide a solution
                 not only for the computation of a measure of similarity
                 (correlation) between terms, but also for the
                 incorporation of these similarities into the retrieval
                 process.\par

                 The major strength of the GVSM derives from the fact
                 that it is theoretically sound and elegant.
                 Furthermore, experimental evaluation of the model on
                 several test collections indicates that the performance
                 is better than that of the VSM. Experiments have been
                 performed on some variations of the GVSM, and all these
                 results have also been compared to those of the VSM,
                 based on inverse document frequency weighting. These
                 results and some ideas for the efficient implementation
                 of the GVSM are discussed.",
  acknowledgement = ack-nhfb,
  affiliation =  "Univ of Regina, Regina, Sask, Can",
  affiliationaddress = "Univ of Regina, Regina, Sask, Can",
  annote =       "The space for both documents and queries is an
                 n-dimensional vector space. In GVSM, terms are not
                 assumed to be linearly independent; measure of
                 independence is based on the number of common
                 documents.",
  classification = "723; 903",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Experimentation; Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "document representation; experimentation; generalized
                 vector space; Information Retrieval; information
                 retrieval systems --- Mathematical Models; information
                 science; languages; query representation, design;
                 theory",
  subject =      "{\bf H.3.3}: Information Systems, INFORMATION STORAGE
                 AND RETRIEVAL, Information Search and Retrieval,
                 Retrieval models. {\bf H.3.1}: Information Systems,
                 INFORMATION STORAGE AND RETRIEVAL, Content Analysis and
                 Indexing, Thesauruses. {\bf H.3.1}: Information
                 Systems, INFORMATION STORAGE AND RETRIEVAL, Content
                 Analysis and Indexing, Indexing methods. {\bf H.3.3}:
                 Information Systems, INFORMATION STORAGE AND RETRIEVAL,
                 Information Search and Retrieval.",
}

@Article{Rybinski:1987:FOL,
  author =       "Henryk Rybi{\'n}ski",
  title =        "On First-Order-Logic Databases",
  journal =      j-TODS,
  volume =       "12",
  number =       "3",
  pages =        "325--349",
  month =        sep,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15 (03B70)",
  MRnumber =     "88j:68033",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-3/p325-rybinski/p325-rybinski.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-3/p325-rybinski/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/27630.html",
  abstract =     "The use of first-order logic as database logic is
                 shown to be powerful enough for formalizing and
                 implementing not only relational but also hierarchical
                 and network-type databases. It enables one to treat all
                 the types of databases in a uniform manner. This paper
                 focuses on the database language for heterogeneous
                 databases. The language is shown to be general enough
                 to specify constraints for a particular type of
                 database, so that a specification of database type can
                 be ``translated'' to the specification given in the
                 database language, creating a ``logical environment''
                 for different views that can be defined by users. Owing
                 to the fact that any database schema is seen as a
                 first-order theory expressed by a finite set of
                 sentences, the problems concerned with completeness and
                 compactness of the database logic discussed by Jacobs
                 (``On Database Logic,'' {\em J. ACM 29\/}, 2 (Apr.
                 1982), 310-332) are avoided.",
  acknowledgement = ack-nhfb,
  annote =       "Successor of Jacobs's work. Language Ld can specify
                 constraints for any database type, which can then be
                 `translated' to a particular database domain.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Languages; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer metatheory --- formal logic; database logic;
                 design; first-order logic; hierarchical databases;
                 languages; network databases; relational databases;
                 theory, database systems",
  review =       "ACM Computing Reviews, Jan 1989",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Data
                 manipulation languages (DML). {\bf F.4.1}: Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic. {\bf I.2.4}: Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Knowledge
                 Representation Formalisms and Methods, Predicate
                 logic.",
}

@Article{Stonebraker:1987:EDS,
  author =       "Michael Stonebraker and Jeff Anton and Eric Hanson",
  title =        "Extending a Database System with Procedures",
  journal =      j-TODS,
  volume =       "12",
  number =       "3",
  pages =        "350--376",
  month =        sep,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  note =         "Also published in/as: UCB/ERL memo M85/59, 1985.",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-3/p350-stonebraker/p350-stonebraker.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-3/p350-stonebraker/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/27631.html",
  abstract =     "This paper suggests that more powerful database
                 systems (DBMS) can be built by supporting database
                 procedures as full-fledged database objects. In
                 particular, allowing fields of a database to be a
                 collection of queries in the query language of the
                 system is shown to allow the natural expression of
                 complex data relationships. Moreover, many of the
                 features present in object-oriented systems and
                 semantic data models can be supported by this facility.
                 \par

                 In order to implement this construct, extensions to a
                 typical relational query language must be made, and
                 considerable work on the execution engine of the
                 underlying DBMS must be accomplished. This paper
                 reports on the extensions for one particular query
                 language and data manager and then gives performance
                 figures for a prototype implementation. Even though the
                 performance of the prototype is competitive with that
                 of a conventional system, suggestions for improvement
                 are presented.",
  acknowledgement = ack-nhfb,
  annote =       "Stored procedures follow DBTG suggestions from 1971.
                 The INGRES+ results were `competitive'.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "database procedures, design; database systems;
                 object-oriented systems; relational query language;
                 semantic data models",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.3}: Information
                 Systems, DATABASE MANAGEMENT, Languages, Query
                 languages. {\bf H.2.4}: Information Systems, DATABASE
                 MANAGEMENT, Systems, Query processing.",
}

@Article{Ozsoyoglu:1987:RMM,
  author =       "Z. Meral {\"O}zsoyo{\u{g}}lu and Li-Yan Yuan",
  title =        "Reduced {MVDs} and Minimal Covers",
  journal =      j-TODS,
  volume =       "12",
  number =       "3",
  pages =        "377--394",
  month =        sep,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "88h:68017",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/citations/journals/tods/1987-12-3/p377-ozsoyoolu/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214286.html",
  abstract =     "Multivalued dependencies (MVDs) are data dependencies
                 that appear frequently in the ``real world'' and play
                 an important role in designing relational database
                 schemes. Given a set of MVDs to constrain a database
                 scheme, it is desirable to obtain an equivalent set of
                 MVDs that do not have any redundancies. In this paper
                 we define such a set of MVDs, called reduced MVDs, and
                 present an algorithm to obtain reduced MVDs. We also
                 define a minimal cover of a set of MVDs, which is a set
                 of reduced MVDs, and give an efficient method to find
                 such a minimal cover. The significance and properties
                 of reduced MVDs are also discussed in the context of
                 database design (e.g., 4NF decomposition) and
                 conflict-free MVDs.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Design; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "algorithms; computer programming --- algorithms;
                 database schema design; database systems; design;
                 minimal covers; multivalued dependencies; theory",
  subject =      "{\bf H.2.1}: Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.1}: Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Normal
                 forms.",
}

@Article{Faloutsos:1987:OSE,
  author =       "Christos Faloutsos and Stavros Christodoulakis",
  title =        "Optimal Signature Extraction and Information Loss",
  journal =      j-TODS,
  volume =       "12",
  number =       "3",
  pages =        "395--428",
  month =        sep,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-3/p395-faloutsos/p395-faloutsos.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-3/p395-faloutsos/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214285.html",
  abstract =     "Signature files seem to be a promising access method
                 for text and attributes. According to this method, the
                 documents (or records) are stored sequentially in one
                 file (``text file''), while abstractions of the
                 documents (``signatures'') are stored sequentially in
                 another file (``signature file''). In order to resolve
                 a query, the signature file is scanned first, and many
                 nonqualifying documents are immediately rejected. We
                 develop a framework that includes primary key hashing,
                 multiattribute hashing, and signature files. Our effort
                 is to find the optimal signature extraction method.
                 \par

                 The main contribution of this paper is that we present
                 optimal and efficient suboptimal algorithms for
                 assigning words to signatures in several environments.
                 Another contribution is that we use information theory,
                 and study the relationship of the false drop
                 probability $ F_d $ and the information that is lost
                 during signature extraction. We give tight lower bounds
                 on the achievable $ F_d $ and show that a simple
                 relationship holds between the two quantities in the
                 case of optimal signature extraction with uniform
                 occurrence and query frequencies. We examine hashing as
                 a method to map words to signatures (instead of the
                 optimal way), and show that the same relationship holds
                 between $ F_d $ and {\em loss}, indicating that an
                 invariant may exist between these two quantities for
                 every signature extraction method.",
  acknowledgement = ack-nhfb,
  annote =       "superimposed coding",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Design; Performance",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "data processing; database systems; information loss;
                 optimal signature extraction; performance; signature
                 files, design",
  subject =      "{\bf H.2.2}: Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods. {\bf E.4}: Data,
                 CODING AND INFORMATION THEORY, Data compaction and
                 compression. {\bf E.5}: Data, FILES. {\bf H.3.2}:
                 Information Systems, INFORMATION STORAGE AND RETRIEVAL,
                 Information Storage, File organization.",
}

@Article{Ibaraki:1987:SC,
  author =       "Toshihide Ibaraki and Tiko Kameda and Toshimi
                 Minoura",
  title =        "Serializability with Constraints",
  journal =      j-TODS,
  volume =       "12",
  number =       "3",
  pages =        "429--452",
  month =        sep,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  MRclass =      "68P15",
  MRnumber =     "909 139",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-3/p429-ibaraki/p429-ibaraki.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-3/p429-ibaraki/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/214284.html",
  abstract =     "This paper deals with the serializability theory for
                 single-version and multiversion database systems. We
                 first introduce the concept of {\em disjoint-interval
                 topological sort\/} ({\em DITS}, for short) of an
                 arc-labeled directed acyclic graph. It is shown that a
                 history is serializable if and only if its {\em
                 transaction IO graph\/} has a DITS. We then define
                 several subclasses of serializable histories, based on
                 the constraints imposed by write-write, write-read,
                 read-write, or read-read conflicts, and investigate
                 inclusion relationships among them. In terms of DITS,
                 we give a sufficient condition for a class of
                 serializable histories to be polynomially recognizable,
                 which is then used to show that a new class of
                 histories, named WRW, can be recognized in polynomial
                 time. We also present NP-completeness results for the
                 problem of testing membership in some other classes.
                 \par

                 In the second half of this paper, we extend these
                 results to multiversion database systems. The inclusion
                 relationships among multiversion classes defined by
                 constraints, such as write-write and write-read, are
                 investigated. One such class coincides with class
                 DMVSR, introduced by Papadimitriou and Kanellakis, and
                 gives a simple characterization of this class. It is
                 shown that for most constraints, multiversion classes
                 properly contain the corresponding single-version
                 classes. Complexity results for the membership testing
                 are also discussed.",
  acknowledgement = ack-nhfb,
  annote =       "classification and properties of conflict graphs, with
                 and without versions.",
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "computer systems programming --- sorting; concurrency
                 control; database systems; disjoint-interval
                 topological sort, algorithms; serializability; theory",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing.",
}

@Article{Wolfson:1987:OLC,
  author =       "Ouri Wolfson",
  title =        "The Overhead of Locking (and Commit) Protocols in
                 Distributed Databases",
  journal =      j-TODS,
  volume =       "12",
  number =       "3",
  pages =        "453--471",
  month =        sep,
  year =         "1987",
  CODEN =        "ATDSD3",
  ISSN =         "0362-5915 (print), 1557-4644 (electronic)",
  ISSN-L =       "0362-5915",
  bibdate =      "Sat Apr 14 10:34:48 MDT 2001",
  bibsource =    "Compendex database; Database/Graefe.bib;
                 Database/Wiederhold.bib; http://www.acm.org/pubs/toc/;
                 http://www.math.utah.edu/pub/tex/bib/tods.bib",
  URL =          "http://www.acm.org/pubs/articles/journals/tods/1987-12-3/p453-wolfson/p453-wolfson.pdf;
                 http://www.acm.org/pubs/citations/journals/tods/1987-12-3/p453-wolfson/;
                 http://www.acm.org/pubs/toc/Abstracts/tods/28053.html",
  abstract =     "The main purpose of a locking protocol is to ensure
                 correct interleaving of actions executed by concurrent
                 transactions. The locking protocol consists of a set of
                 rules dictating how accessed entities should be locked
                 and unlocked. As a result of obeying the rules,
                 transactions in a distributed database incur an
                 overhead. We propose three measures of evaluating this
                 overhead, each most suitable to a different type of
                 underlying communication network. Then, using a graph
                 theoretic model, we analyze and compare three protocols
                 according to each measure: two-phase locking, two-phase
                 locking with a fixed order imposed on the database
                 entities (ensuring deadlock freedom), and the tree
                 protocol. In practice, a transaction also executes the
                 two-phase commit protocol in order to guarantee
                 atomicity. Therefore, the combined overhead of each
                 locking protocol and the two-phase commit protocol is
                 also determined.",
  acknowledgement = ack-nhfb,
  classification = "723",
  fjournal =     "ACM Transactions on Database Systems",
  generalterms = "Algorithms; Measurement; Performance; Theory",
  journal-URL =  "http://portal.acm.org/browse_dl.cfm?idx=J777",
  keywords =     "commit protocols; computer networks --- protocols;
                 concurrency control, algorithms; database systems;
                 locking protocols; measurement; message passing;
                 performance; theory",
  subject =      "{\bf H.2.4}: Information Systems, DATABASE MANAGEMENT,
                 Systems, Concurrency. {\bf C.4}: Computer Systems
                 Organization, PERFORMANCE OF SYSTEMS. {\bf C.2.4}:
                 Computer Systems Organization, COMPUTER-COMMUNICATION
                 NETWORKS, Distributed Systems, Distributed databases.
                 {\bf D.2.8}: Software, S