Valid HTML 4.0! Valid CSS!
%%% -*-BibTeX-*-
%%% ====================================================================
%%%  BibTeX-file{
%%%     author          = "Nelson H. F. Beebe",
%%%     version         = "2.27",
%%%     date            = "09 November 2023",
%%%     time            = "09:31:48 MST",
%%%     filename        = "pods.bib",
%%%     address         = "University of Utah
%%%                        Department of Mathematics, 110 LCB
%%%                        155 S 1400 E RM 233
%%%                        Salt Lake City, UT 84112-0090
%%%                        USA",
%%%     telephone       = "+1 801 581 5254",
%%%     FAX             = "+1 801 581 4148",
%%%     URL             = "https://www.math.utah.edu/~beebe",
%%%     checksum        = "19294 53291 252158 2769241",
%%%     email           = "beebe at math.utah.edu, beebe at acm.org,
%%%                        beebe at computer.org (Internet)",
%%%     codetable       = "ISO/ASCII",
%%%     keywords        = "bibliography; database systems; Management of
%%%                        Data; Principles of Database Systems (PODS);
%%%                        SIGACT; SIGMOD",
%%%     license         = "public domain",
%%%     supported       = "yes",
%%%     docstring       = "This is a BibTeX bibliography for the ACM
%%%                        SIGACT-SIGMOD Symposia on Principles of
%%%                        Database Systems (PODS 'xx) (1982--date), and
%%%                        the ACM SIGMOD Conferences on Management of
%%%                        Data (SIGMOD 'xx) (1975--date).  These
%%%                        conferences are generally held together, and in
%%%                        several cases, the SIGMOD 'xx proceedings
%%%                        have been published as an issue of the
%%%                        journal SIGMOD Record.
%%%
%%%                        Version 1.00 of this bibliography covered only
%%%                        the PODS 'xx symposia; at version 2.00,
%%%                        entries for the SIGMOD 'xx conferences were
%%%                        added.
%%%
%%%                        The companion bibliography tods.bib covers
%%%                        the ACM Transactions on Database Systems, and
%%%                        the companion bibliography vldb.bib covers
%%%                        the International Conferences on Very Large
%%%                        Data Bases.
%%%
%%%                        The publisher maintains World Wide Web sites
%%%                        for these conference proceedings at
%%%
%%%                            http://www.sigmod.org/publications/literature
%%%                            http://www.sigmod.org/sigmod-pods-conferences
%%%                            http://www.sigmod.org/<YEAR>/
%%%
%%%                        with entries for 1985--date.  PDF files with
%%%                        full text of articles are available to
%%%                        qualified subscribers.  All of the papers
%%%                        listed at that Web site are included in this
%%%                        bibliography.
%%%
%%%                        Although all proceedings volumes onward from
%%%                        the first in 1982 are included here, the
%%%                        proceedings contents are not yet available
%%%                        for all years: most of the entries for
%%%                        1975--1984 are still missing.
%%%
%%%                        At version 2.27, the year coverage looked
%%%                        like this:
%%%
%%%                             1975 (   1)    1988 (  90)    2001 ( 115)
%%%                             1976 (   2)    1989 (  83)    2002 ( 110)
%%%                             1977 (   1)    1990 (  85)    2003 ( 117)
%%%                             1978 (   1)    1991 (  79)    2004 ( 119)
%%%                             1979 (   1)    1992 ( 131)    2005 (  36)
%%%                             1980 (   1)    1993 ( 119)    2006 (  40)
%%%                             1981 (   0)    1994 ( 111)    2007 (  32)
%%%                             1982 (   4)    1995 ( 135)    2008 (  32)
%%%                             1983 (   3)    1996 (  95)    2009 (  31)
%%%                             1984 (   6)    1997 (  96)    2010 (  32)
%%%                             1985 (  57)    1998 ( 122)    2011 (  29)
%%%                             1986 (  32)    1999 ( 122)    2012 (  31)
%%%                             1987 (  85)    2000 (  85)    2013 (  29)
%%%
%%%                             Article:          1
%%%                             InProceedings: 2238
%%%                             Proceedings:     61
%%%
%%%                             Total entries: 2300
%%%
%%%                        This bibliography was initially built from
%%%                        searches in the OCLC Content1st database.
%%%                        Additions were then made from all of the
%%%                        bibliographies in the TeX User Group
%%%                        collection, from bibliographies in the
%%%                        author's personal files, from the IEEE
%%%                        INSPEC CD-ROM database (1989--1995), from
%%%                        the Compendex database, from the American
%%%                        Mathematical Society MathSciNet database,
%%%                        and from the computer science bibliography
%%%                        collection on ftp.ira.uka.de in
%%%                        /pub/bibliography to which many people of
%%%                        have contributed.  The snapshot of this
%%%                        collection was taken on 5-May-1994, and it
%%%                        consists of 441 BibTeX files, 2,672,675
%%%                        lines, 205,289 entries, and 6,375
%%%                        <at>String{} abbreviations, occupying
%%%                        94.8MB of disk space.
%%%
%%%                        Numerous errors in the sources noted above
%%%                        have been corrected.  Spelling has been
%%%                        verified with the UNIX spell and GNU ispell
%%%                        programs using the exception dictionary
%%%                        stored in the companion file with extension
%%%                        .sok.
%%%
%%%                        BibTeX citation tags are uniformly chosen as
%%%                        name:year:abbrev, where name is the family
%%%                        name of the first author or editor, year is a
%%%                        4-digit number, and abbrev is a 3-letter
%%%                        condensation of important title words.
%%%                        Citation labels were automatically generated
%%%                        by software developed for the BibNet Project.
%%%
%%%                        In this bibliography, entries are sorted in
%%%                        publication order, with the help of ``bibsort
%%%                        -byvolume''.  The bibsort utility is available
%%%                        from ftp.math.utah.edu in /pub/tex/bib.
%%%
%%%                        The checksum field above contains a CRC-16
%%%                        checksum as the first value, followed by the
%%%                        equivalent of the standard UNIX wc (word
%%%                        count) utility output of lines, words, and
%%%                        characters.  This is produced by Robert
%%%                        Solovay's checksum utility.",
%%%  }
%%% ====================================================================
@Preamble{
    "\hyphenation{
    }"
    # "\ifx \undefined \TM \def \TM {${}^{\sc TM}$} \fi"
}

%%% ====================================================================
%%% Acknowledgement abbreviations:
@String{ack-nhfb = "Nelson H. F. Beebe,
                    University of Utah,
                    Department of Mathematics, 110 LCB,
                    155 S 1400 E RM 233,
                    Salt Lake City, UT 84112-0090, USA,
                    Tel: +1 801 581 5254,
                    FAX: +1 801 581 4148,
                    e-mail: \path|beebe@math.utah.edu|,
                            \path|beebe@acm.org|,
                            \path|beebe@computer.org| (Internet),
                    URL: \path|https://www.math.utah.edu/~beebe/|"}

%%% ====================================================================
%%% Journal abbreviations:
@String{j-SIGACT-SIGMOD-SYMP-PODS = "ACM SIGACT-SIGMOD Symposium on Principles
                                  of Database Systems"}

@String{j-SIGMOD                = "SIGMOD Record (ACM Special Interest Group
                                  on Management of Data)"}

%%% ====================================================================
%%% Publishers and their addresses:
@String{pub-ACM                 = "ACM Press"}

@String{pub-ACM:adr             = "New York, NY 10036, USA"}

@String{pub-AP                  = "Academic Press"}

@String{pub-AP:adr              = "New York, USA"}

@String{pub-WORLD-SCI           = "World Scientific Publishing Co."}

@String{pub-WORLD-SCI:adr       = "Singapore; Philadelphia, PA, USA; River
                                  Edge, NJ, USA"}

%%% ====================================================================
%%% Bibliography entries:
@Article{Lin:1976:DRA,
  author =       "C. S. Lin and D. C. P. Smith and J. M. Smith",
  title =        "The Design of a Rotating Associative Array Memory for
                 a Relational Database Management Application",
  journal =      j-SIGACT-SIGMOD-SYMP-PODS,
  volume =       "1",
  number =       "1",
  pages =        "??--??",
  month =        mar,
  year =         "1976",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  note =         "Also published in/as: Proceedings of the First
                 Conference on Very Large Databases, Morgan Kaufman
                 pubs. (Los Altos CA), Kerr (ed.), 1975, pp. 453--455.",
  annote =       "Data analysis in the file control unit.",
}

@InProceedings{Kuck:1982:URD,
  author =       "S. M. Kuck and Y. Sagiv",
  title =        "A Universal Relation Database System Implemented Via
                 the Network Model",
  crossref =     "ACM:1982:PPA",
  pages =        "??--??",
  year =         "1982",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  annote =       "A Subset of a CODASYL implementation is used to
                 provide for universal relations. Lossless joins are
                 related to automatic, mandatory sets. Both schema
                 design and access path optimization is presented.",
}

@InProceedings{Chandra:1983:HCF,
  author =       "A. K. Chandra and D. Harel",
  title =        "{Horn} clauses and the fixpoint query hierarchy",
  crossref =     "ACM:1983:PPS",
  pages =        "??--??",
  year =         "1983",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  annote =       "on the complexity of answering queries defined by
                 logical rules",
}

@InProceedings{Cosmadakis:1984:FID,
  author =       "S. S. Cosmadakis and P. C. Kanellakis",
  title =        "Functional and Inclusion Dependencies: a
                 graph-theoretic Approach",
  crossref =     "ACM:1984:PPT",
  pages =        "??--??",
  year =         "1984",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  annote =       "Ownership.",
}

@InProceedings{Lehman:1984:KCK,
  author =       "D. Lehman",
  title =        "Knowledge, Common Knowledge, and Related Puzzles",
  crossref =     "ACM:1984:PPT",
  pages =        "??--??",
  year =         "1984",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Pitelli:1984:BAU,
  author =       "F. Pitelli and H. Garc{\'\i}a-Molina and S. Davidson",
  title =        "Is {Byzantine} Agreement Useful in a Distributed
                 Database System",
  crossref =     "ACM:1984:PPT",
  pages =        "??--??",
  year =         "1984",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  note =         "Also published in/as: to appear in ACM Transactions on
                 Database Systems 1985.",
}

@InProceedings{Stemple:1984:SVA,
  author =       "D. Stemple and T. Sheard",
  title =        "Specification and Verification of Abstract Database
                 Types",
  crossref =     "ACM:1984:PPT",
  pages =        "??--??",
  year =         "1984",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  annote =       "All integrity constraints are Schema declarations",
}

@InProceedings{Ozsoyoglu:1985:LPO,
  author =       "Gultekin Ozsoyoglu and Z. Meral Ozsoyoglu and
                 Francisco Mata",
  title =        "A language and a physical organization technique for
                 summary tables",
  crossref =     "Navathe:1985:PAS",
  pages =        "3--16",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p3-ozsoyoglu/p3-ozsoyoglu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p3-ozsoyoglu/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Roussopoulos:1985:DSS,
  author =       "Nick Roussopoulos and Daniel Leifker",
  title =        "Direct spatial search on pictorial databases using
                 packed {R}-trees",
  crossref =     "Navathe:1985:PAS",
  pages =        "17--31",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p17-roussopoulos/p17-roussopoulos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p17-roussopoulos/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Christodoulakis:1985:IAD,
  author =       "S. Christodoulakis",
  title =        "Issues in the architecture of a document archiver
                 using optical disk technology",
  crossref =     "Navathe:1985:PAS",
  pages =        "34--50",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p34-christodoulakis/p34-christodoulakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p34-christodoulakis/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Barbic:1985:TMO,
  author =       "F. Barbic and B. Pernici",
  title =        "Time modeling in office information systems",
  crossref =     "Navathe:1985:PAS",
  pages =        "51--62",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p51-barbic/p51-barbic.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p51-barbic/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Faloutsos:1985:SFD,
  author =       "Chris Faloutsos",
  title =        "Signature files: design and performance comparison of
                 some signature extraction methods",
  crossref =     "Navathe:1985:PAS",
  pages =        "63--82",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p63-faloutsos/p63-faloutsos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p63-faloutsos/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Eick:1985:ATK,
  author =       "Christoph F. Eick and Peter C. Lockemann",
  title =        "Acquisition of terminological knowledge using database
                 design techniques",
  crossref =     "Navathe:1985:PAS",
  pages =        "84--94",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p84-eick/p84-eick.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p84-eick/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Shin:1985:PRD,
  author =       "D. G. Shin and K. B. Irani",
  title =        "Partitioning a relational database horizontally using
                 a knowledge-based approach",
  crossref =     "Navathe:1985:PAS",
  pages =        "95--105",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p95-shin/p95-shin.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p95-shin/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Agrawal:1985:MSC,
  author =       "Rakesh Agrawal and Michael J. Carey and Miron Livny",
  title =        "Models for studying concurrency control performance:
                 alternatives and implications",
  crossref =     "Navathe:1985:PAS",
  pages =        "108--121",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p108-agrawal/p108-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p108-agrawal/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Robinson:1985:FGP,
  author =       "John T. Robinson",
  title =        "A fast general-purpose hardware synchronization
                 mechanism",
  crossref =     "Navathe:1985:PAS",
  pages =        "122--130",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p122-robinson/p122-robinson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p122-robinson/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Agrawal:1985:RAM,
  author =       "Rakesh Agrawal and David J. DeWitt",
  title =        "Recovery architectures for multiprocessor database
                 machines",
  crossref =     "Navathe:1985:PAS",
  pages =        "131--145",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p131-agrawal/p131-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p131-agrawal/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Batini:1985:DDM,
  author =       "Carlo Batini and Stefano Ceri and Al Hershey and
                 George Gardarin and David Reiner",
  title =        "Database design: methodologies, tools, and
                 environments (panel session)",
  crossref =     "Navathe:1985:PAS",
  pages =        "148--150",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p148-batini/p148-batini.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p148-batini/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hsu:1985:ICM,
  author =       "Arding Hsu and Tomasz Imielinski",
  title =        "Integrity checking for multiple updates",
  crossref =     "Navathe:1985:PAS",
  pages =        "152--168",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/citations/proceedings/mod/318898/p152-hsu/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kung:1985:VDT,
  author =       "C. H. Kung",
  title =        "On verification of database temporal constraints",
  crossref =     "Navathe:1985:PAS",
  pages =        "169--179",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p169-kung/p169-kung.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p169-kung/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kuper:1985:EPL,
  author =       "Gabriel M. Kuper and Moshe Y. Vardi",
  title =        "On the expressive power of the logical data model:
                 preliminary report",
  crossref =     "Navathe:1985:PAS",
  pages =        "180--187",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p180-kuper/p180-kuper.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p180-kuper/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Larson:1985:EPH,
  author =       "Per-Ake Larson and M. V. Ramakrishna",
  title =        "External perfect hashing",
  crossref =     "Navathe:1985:PAS",
  pages =        "190--200",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p190-larson/p190-larson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p190-larson/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kawagoe:1985:MDH,
  author =       "Kyoji Kawagoe",
  title =        "Modified dynamic hashing",
  crossref =     "Navathe:1985:PAS",
  pages =        "201--213",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p201-kawagoe/p201-kawagoe.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p201-kawagoe/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Otoo:1985:MDH,
  author =       "Ekow J. Otoo",
  title =        "A multidimensional digital hashing scheme for files
                 with composite keys",
  crossref =     "Navathe:1985:PAS",
  pages =        "214--229",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p214-otoo/p214-otoo.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p214-otoo/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sibley:1985:PDM,
  author =       "Edgar H. Sibley and Matthias Jarke and Cecil S. McMinn
                 and John Murray and Randall Rustin and Ken Sloan",
  title =        "Pragmatics of database management (panel session)",
  crossref =     "Navathe:1985:PAS",
  pages =        "232--234",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p232-sibley/p232-sibley.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p232-sibley/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Snodgrass:1985:TTD,
  author =       "Richard Snodgrass and Ilsoo Ahn",
  title =        "A taxonomy of time databases",
  crossref =     "Navathe:1985:PAS",
  pages =        "236--246",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p236-snodgrass/p236-snodgrass.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p236-snodgrass/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Clifford:1985:AHR,
  author =       "James Clifford and Abdullah Uz Tansel",
  title =        "On an algebra for historical relational databases: two
                 views",
  crossref =     "Navathe:1985:PAS",
  pages =        "247--265",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p247-clifford/p247-clifford.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p247-clifford/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Copeland:1985:DSM,
  author =       "George P. Copeland and Setrag N. Khoshafian",
  title =        "A decomposition storage model",
  crossref =     "Navathe:1985:PAS",
  pages =        "268--279",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p268-copeland/p268-copeland.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p268-copeland/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Yu:1985:AIS,
  author =       "C. T. Yu and C. H. Chen",
  title =        "Adaptive information system design: one query at a
                 time",
  crossref =     "Navathe:1985:PAS",
  pages =        "280--290",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p280-yu/p280-yu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p280-yu/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Beckley:1985:MRK,
  author =       "D. A. Beckley and M. W. Evens and V. K. Raman",
  title =        "Multikey retrieval from {K-d} trees and {QUAD-trees}",
  crossref =     "Navathe:1985:PAS",
  pages =        "291--301",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p291-beckley/p291-beckley.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p291-beckley/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Christodoulakis:1985:MDM,
  author =       "Starvos Christodoulakis and D. Badal and A. Cardenas
                 and P. Mantey and F. Tompa and D. Tsichritzis",
  title =        "Multimedia database management (panel session)",
  crossref =     "Navathe:1985:PAS",
  pages =        "304--305",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p304-christodoulakis/p304-christodoulakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p304-christodoulakis/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Fushimi:1985:APE,
  author =       "Shinya Fushimi and Masaru Kitsuregawa and Masaya
                 Nakayama and Hidehiko Tanaka and Tohru Moto-oka",
  title =        "Algorithm and performance evaluation of adaptive
                 multidimensional clustering technique",
  crossref =     "Navathe:1985:PAS",
  pages =        "308--318",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p308-fushimi/p308-fushimi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p308-fushimi/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kamel:1985:MDD,
  author =       "Nabil Kamel and Roger King",
  title =        "A model of data distribution based on texture
                 analysis",
  crossref =     "Navathe:1985:PAS",
  pages =        "319--325",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p319-kamel/p319-kamel.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p319-kamel/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Goldman:1985:IIS,
  author =       "Kenneth J. Goldman and Sally A. Goldman and Paris C.
                 Kanellakis and Stanley B. Zdonik",
  title =        "{ISIS}: interface for a semantic information system",
  crossref =     "Navathe:1985:PAS",
  pages =        "328--342",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p328-goldman/p328-goldman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p328-goldman/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Vossen:1985:HLU,
  author =       "Gottfried Vossen and Volkert Brosda",
  title =        "A high-level user interface for update and retrieval
                 in relational databases--language aspects",
  crossref =     "Navathe:1985:PAS",
  pages =        "343--353",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p343-vossen/p343-vossen.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p343-vossen/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Demo:1985:ACD,
  author =       "G. Barbara Demo and Sukhamay Kundu",
  title =        "Analysis of the context dependency of {CODASYL}
                 find-statements with application to a database program
                 conversion",
  crossref =     "Navathe:1985:PAS",
  pages =        "354--361",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p354-demo/p354-demo.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p354-demo/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Acharya:1985:TRP,
  author =       "Shridhar Acharya and Gael Buckley",
  title =        "Transaction restarts in {Prolog} database systems",
  crossref =     "Navathe:1985:PAS",
  pages =        "364--373",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p364-acharya/p364-acharya.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p364-acharya/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Page:1985:GDD,
  author =       "Thomas W. Page and Matthew J. Weinstein and Gerald J.
                 Popek",
  title =        "Genesis: a distributed database operating system",
  crossref =     "Navathe:1985:PAS",
  pages =        "374--387",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p374-page/p374-page.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p374-page/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Klahold:1985:TMS,
  author =       "P. Klahold and G. Schlageter and R. Unland and W.
                 Wilkes",
  title =        "A transaction model supporting complex applications in
                 integrated information systems",
  crossref =     "Navathe:1985:PAS",
  pages =        "388--401",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/citations/proceedings/mod/318898/p388-klahold/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sinha:1985:TBC,
  author =       "Mukul K. Sinha and P. D. Nandikar and S. L.
                 Mehndiratta",
  title =        "Timestamp based certification schemes for transactions
                 in distributed database systems",
  crossref =     "Navathe:1985:PAS",
  pages =        "402--411",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p402-sinha/p402-sinha.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p402-sinha/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Kerschberg:1985:EDS,
  author =       "Larry Kerschberg and Michael Brodie and Charles
                 Kellogg and D. Stott Parker and Gio Wiederhold and
                 Carlo Zaniolo",
  title =        "Expert database systems (workshop review)",
  crossref =     "Navathe:1985:PAS",
  pages =        "414--417",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p414-kerschberg/p414-kerschberg.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p414-kerschberg/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bhargava:1985:RDD,
  author =       "Bharat Bhargava",
  title =        "Reliability in distributed database systems (panel
                 discussion)",
  crossref =     "Navathe:1985:PAS",
  pages =        "420--422",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p420-bhargava/p420-bhargava.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p420-bhargava/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Sellis:1985:OED,
  author =       "Timos K. Sellis and Leonard Shapiro",
  title =        "Optimization of extended database query languages",
  crossref =     "Navathe:1985:PAS",
  pages =        "424--436",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p424-sellis/p424-sellis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p424-sellis/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gray:1985:EPA,
  author =       "P. M. D. Gray",
  title =        "Efficient {Prolog} access to {CODAYSL} and {FDM}
                 databases",
  crossref =     "Navathe:1985:PAS",
  pages =        "437--443",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p437-gray/p437-gray.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p437-gray/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Blain:1985:MPC,
  author =       "Tomas Blain and Michael Dohler and Ralph Michaelis and
                 Emran Qureshi",
  title =        "Managing the printed circuit board design process",
  crossref =     "Navathe:1985:PAS",
  pages =        "447--456",
  year =         "1985",
  bibdate =      "Wed Oct 25 08:47:38 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/318898/p447-blain/p447-blain.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/318898/p447-blain/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Abiteboul:1985:TIC,
  author =       "S. Abiteboul and V. Vianu",
  title =        "Transactions and Integrity Constraints",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Atzeni:1985:EQA,
  author =       "P. Atzeni and E. P. F. Chan",
  title =        "Efficient Query Answering in the Representative
                 Instance Approach",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Ausiello:1985:CPG,
  author =       "G. Ausiello and A. D'Atri",
  title =        "Chordality Properties on Graphs and Minimal Conceptual
                 Connections in Semantic Data Models",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Bancilhon:1985:AVP,
  author =       "F. Bancilhon and M. Spyratos",
  title =        "Algebraic Versus Probabilistic Independence in Data
                 Bases",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Casanova:1985:CLR,
  author =       "M. A. Casanova and A. V. Moura and L. Tucherman",
  title =        "On the Correctness of a Local Recovery Subsystem",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Cosmadakis:1985:PSR,
  author =       "S. S. Cosmadakis and P. C. Kanellakis and N.
                 Spyratos",
  title =        "Partition Semantics for Relations",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Garcia-Molina:1985:EEC,
  author =       "H. Garc{\'\i}a-Molina and J. Kent",
  title =        "An Experimental Evaluation of Crash Recovery
                 Mechanism",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Gyssens:1985:EJD,
  author =       "Marc Gyssens",
  title =        "Embedded Join Dependencies as a Tool for Decomposing
                 Full Join Dependencies",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Keller:1985:ATV,
  author =       "A. M. Keller",
  title =        "Algorithms for Translating View Updates to Database
                 Updates for Views Involving Selections, Projections,
                 and Joins",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Mannila:1985:SAR,
  author =       "H. Mannila and K-J. Raiha",
  title =        "Small {Armstrong} Relations for Database Design",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Page:1985:DDM,
  author =       "T. W. {Page, Jr.} and G. J. Popek",
  title =        "Distributed Data Management in Local Area Networks",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Sagiv:1985:COB,
  author =       "Yehoshua Sagiv",
  title =        "Concurrent Operations on {B}*-Trees with Overtaking",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Sagiv:1985:CRP,
  author =       "Yehoshua Sagiv",
  title =        "On Computing Restricted Projections of Representative
                 Instances",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Skeen:1985:EFT,
  author =       "D. Skeen and F. Cristian and A. ElAbbadi",
  title =        "An Efficient Fault-Tolerant Algorithm for Replicated
                 Data Management",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Stein:1985:RUS,
  author =       "J. Stein and D. Maier",
  title =        "Relaxing the Universal Scheme Assumption",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Vardi:1985:QLD,
  author =       "Moshe Vardi",
  title =        "Querying Logical Databases",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Yannakakis:1985:CRC,
  author =       "Mihalis Yannakakis and C. H. Papadimitriou",
  title =        "The Complexity of Reliable Concurrency Control",
  crossref =     "ACM:1985:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1985",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Bancilhon:1986:MSO,
  author =       "Fran{\c{c}}ois Bancilhon and David Maier and Yehoshua
                 Sagiv and Jeffrey D. Ullman",
  title =        "Magic sets and other strange ways to implement logic
                 programs (extended abstract)",
  crossref =     "ACM:1986:PPF",
  pages =        "1--15",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p1-bancilhon/p1-bancilhon.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p1-bancilhon/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; languages; performance; theory",
  subject =      "{\bf I.2.2} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Automatic Programming, Program
                 transformation. {\bf I.2.3} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving,
                 Logic programming. {\bf I.2.4} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Knowledge Representation
                 Formalisms and Methods, Representations (procedural and
                 rule-based). {\bf I.2.4} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Knowledge Representation
                 Formalisms and Methods, Predicate logic. {\bf I.2.5}
                 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Programming Languages and Software, Prolog. {\bf H.2.3}
                 Information Systems, DATABASE MANAGEMENT, Languages,
                 Query languages. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Query processing. {\bf
                 H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design.",
}

@InProceedings{Sacca:1986:ISC,
  author =       "Domenico Sacc{\`a} and Carlo Zaniolo",
  title =        "On the implementation of a simple class of logic
                 queries for databases",
  crossref =     "ACM:1986:PPF",
  pages =        "16--23",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p16-sacca/p16-sacca.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p16-sacca/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; languages; performance; theory",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf F.4.1} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic, Logic and constraint programming.
                 {\bf I.2.4} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Knowledge Representation Formalisms and
                 Methods, Predicate logic. {\bf E.1} Data, DATA
                 STRUCTURES, Graphs and networks. {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Answer/reason extraction. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design.",
}

@InProceedings{Afrati:1986:CSQ,
  author =       "Foto Afrati and Christos Papadimitriou and George
                 Papageorgiou and Athena Roussou and Yehoshua Sagiv and
                 Jeffrey D. Ullman",
  title =        "Convergence of sideways query evaluation",
  crossref =     "ACM:1986:PPF",
  pages =        "24--30",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p24-afrati/p24-afrati.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p24-afrati/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p24-afrati/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; languages; theory",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Data
                 models. {\bf G.2.2} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Graph Theory, Graph algorithms. {\bf
                 F.4.3} Theory of Computation, MATHEMATICAL LOGIC AND
                 FORMAL LANGUAGES, Formal Languages, Classes defined by
                 grammars or automata.",
}

@InProceedings{Weikum:1986:TFM,
  author =       "Gerhard Weikum",
  title =        "A theoretical foundation of multi-level concurrency
                 control",
  crossref =     "ACM:1986:PPF",
  pages =        "31--43",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p31-weikum/p31-weikum.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p31-weikum/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p31-weikum/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing. {\bf D.4.1} Software,
                 OPERATING SYSTEMS, Process Management, Concurrency.
                 {\bf D.4.1} Software, OPERATING SYSTEMS, Process
                 Management, Scheduling. {\bf D.4.1} Software, OPERATING
                 SYSTEMS, Process Management, Deadlocks.",
}

@InProceedings{Hadzilacos:1986:DCT,
  author =       "Thanasis Hadzilacos and Mihalis Yannakakis",
  title =        "Deleting completed transactions",
  crossref =     "ACM:1986:PPF",
  pages =        "43--46",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p43-hadzilacos/p43-hadzilacos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p43-hadzilacos/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p43-hadzilacos/",
  acknowledgement = ack-nhfb,
  keywords =     "design; theory; verification",
  subject =      "{\bf D.4.1} Software, OPERATING SYSTEMS, Process
                 Management, Concurrency. {\bf D.4.1} Software,
                 OPERATING SYSTEMS, Process Management, Deadlocks. {\bf
                 D.4.1} Software, OPERATING SYSTEMS, Process Management,
                 Scheduling. {\bf G.2.2} Mathematics of Computing,
                 DISCRETE MATHEMATICS, Graph Theory, Graph algorithms.
                 {\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models.",
}

@InProceedings{Su:1986:SNW,
  author =       "Jianwen Su",
  title =        "Safety of non-well-locked transaction systems",
  crossref =     "ACM:1986:PPF",
  pages =        "47--52",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p47-su/p47-su.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p47-su/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p47-su/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; performance; security; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing. {\bf D.4.1} Software,
                 OPERATING SYSTEMS, Process Management, Concurrency.
                 {\bf D.4.1} Software, OPERATING SYSTEMS, Process
                 Management, Deadlocks. {\bf D.4.1} Software, OPERATING
                 SYSTEMS, Process Management, Scheduling. {\bf H.2.2}
                 Information Systems, DATABASE MANAGEMENT, Physical
                 Design, Access methods. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Data
                 models.",
}

@InProceedings{Bancilhon:1986:CCO,
  author =       "Fran{\c{c}}ois Bancilhon and Setrag Khoshafian",
  title =        "A calculus for complex objects",
  crossref =     "ACM:1986:PPF",
  pages =        "53--60",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p53-bancilhon/p53-bancilhon.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p53-bancilhon/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p53-bancilhon/",
  acknowledgement = ack-nhfb,
  keywords =     "design; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Normal
                 forms. {\bf G.2.2} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Graph Theory, Graph algorithms. {\bf
                 F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS
                 AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and
                 Problems, Computations on discrete structures. {\bf
                 F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS
                 AND PROBLEM COMPLEXITY, Numerical Algorithms and
                 Problems, Number-theoretic computations.",
}

@InProceedings{VanGucht:1986:SCM,
  author =       "Dirk {Van Gucht} and Patrick C. Fischer",
  title =        "Some classes of multilevel relational structures",
  crossref =     "ACM:1986:PPF",
  pages =        "60--69",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p60-van_gucht/p60-van_gucht.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p60-van_gucht/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p60-van_gucht/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Normal
                 forms. {\bf H.2.1} Information Systems, DATABASE
                 MANAGEMENT, Logical Design, Schema and subschema. {\bf
                 F.2.1} Theory of Computation, ANALYSIS OF ALGORITHMS
                 AND PROBLEM COMPLEXITY, Numerical Algorithms and
                 Problems, Number-theoretic computations.",
}

@InProceedings{Gadia:1986:WTR,
  author =       "Shashi K. Gadia",
  title =        "Weak temporal relations",
  crossref =     "ACM:1986:PPF",
  pages =        "70--77",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p70-gadia/p70-gadia.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p70-gadia/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p70-gadia/",
  acknowledgement = ack-nhfb,
  keywords =     "design; languages; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf F.2.1} Theory of
                 Computation, ANALYSIS OF ALGORITHMS AND PROBLEM
                 COMPLEXITY, Numerical Algorithms and Problems,
                 Number-theoretic computations. {\bf D.3.1} Software,
                 PROGRAMMING LANGUAGES, Formal Definitions and Theory,
                 Semantics. {\bf H.2.4} Information Systems, DATABASE
                 MANAGEMENT, Systems, Query processing.",
}

@InProceedings{Olken:1986:RDM,
  author =       "Frank Olken and Doron Rotem",
  title =        "Rearranging data to maximize the efficiency of
                 compression",
  crossref =     "ACM:1986:PPF",
  pages =        "78--90",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p78-olken/p78-olken.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p78-olken/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p78-olken/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; economics; theory; verification",
  subject =      "{\bf E.4} Data, CODING AND INFORMATION THEORY, Data
                 compaction and compression. {\bf H.3.2} Information
                 Systems, INFORMATION STORAGE AND RETRIEVAL, Information
                 Storage, File organization. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Data
                 models.",
}

@InProceedings{Robinson:1986:OPL,
  author =       "John T. Robinson",
  title =        "Order preserving linear hashing using dynamic key
                 statistics",
  crossref =     "ACM:1986:PPF",
  pages =        "91--99",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p91-robinson/p91-robinson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p91-robinson/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p91-robinson/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; experimentation; measurement;
                 performance; theory",
  subject =      "{\bf H.2.2} Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods. {\bf E.2} Data, DATA
                 STORAGE REPRESENTATIONS, Hash-table representations.
                 {\bf E.5} Data, FILES, Organization/structure. {\bf
                 D.4.3} Software, OPERATING SYSTEMS, File Systems
                 Management, Access methods.",
}

@InProceedings{Otoo:1986:BME,
  author =       "Ekow J. Otoo",
  title =        "Balanced multidimensional extendible hash tree",
  crossref =     "ACM:1986:PPF",
  pages =        "100--113",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p100-otoo/p100-otoo.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p100-otoo/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p100-otoo/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; experimentation; theory",
  subject =      "{\bf E.2} Data, DATA STORAGE REPRESENTATIONS,
                 Hash-table representations. {\bf E.1} Data, DATA
                 STRUCTURES, Trees. {\bf E.5} Data, FILES,
                 Organization/structure. {\bf E.1} Data, DATA
                 STRUCTURES, Arrays. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design, Schema and
                 subschema. {\bf H.2.7} Information Systems, DATABASE
                 MANAGEMENT, Database Administration, Data
                 dictionary/directory. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Query processing. {\bf
                 F.2.2} Theory of Computation, ANALYSIS OF ALGORITHMS
                 AND PROBLEM COMPLEXITY, Nonnumerical Algorithms and
                 Problems, Sorting and searching. {\bf D.4.3} Software,
                 OPERATING SYSTEMS, File Systems Management, Access
                 methods.",
}

@InProceedings{Naqvi:1986:NFF,
  author =       "Shamim A. Naqvi",
  title =        "Negation as failure for first-order queries",
  crossref =     "ACM:1986:PPF",
  pages =        "114--122",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p114-naqvi/p114-naqvi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p114-naqvi/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p114-naqvi/",
  acknowledgement = ack-nhfb,
  keywords =     "economics; languages; performance; reliability;
                 theory",
  subject =      "{\bf I.2.5} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Programming Languages and Software,
                 Prolog. {\bf H.2.3} Information Systems, DATABASE
                 MANAGEMENT, Languages, Query languages. {\bf F.4.1}
                 Theory of Computation, MATHEMATICAL LOGIC AND FORMAL
                 LANGUAGES, Mathematical Logic, Logic and constraint
                 programming. {\bf I.2.4} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Knowledge Representation
                 Formalisms and Methods, Predicate logic. {\bf F.4.3}
                 Theory of Computation, MATHEMATICAL LOGIC AND FORMAL
                 LANGUAGES, Formal Languages, Classes defined by
                 grammars or automata.",
}

@InProceedings{Bidoit:1986:PVM,
  author =       "Nicole Bidoit and Richard Hull",
  title =        "Positivism vs. minimalism in deductive databases",
  crossref =     "ACM:1986:PPF",
  pages =        "123--132",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p123-bidoit/p123-bidoit.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p123-bidoit/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p123-bidoit/",
  acknowledgement = ack-nhfb,
  keywords =     "languages; performance; reliability; theory",
  subject =      "{\bf I.2.3} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Deduction and Theorem Proving, Deduction.
                 {\bf H.1.m} Information Systems, MODELS AND PRINCIPLES,
                 Miscellaneous. {\bf I.2.3} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving,
                 Nonmonotonic reasoning and belief revision. {\bf I.2.4}
                 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Knowledge Representation Formalisms and Methods,
                 Predicate logic. {\bf D.3.1} Software, PROGRAMMING
                 LANGUAGES, Formal Definitions and Theory, Semantics.
                 {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Logic and
                 constraint programming.",
}

@InProceedings{Gelfond:1986:ECW,
  author =       "M. Gelfond and H. Przymusinska and T. Przymusinski",
  title =        "The extended closed world assumption and its
                 relationship to parallel circumscription",
  crossref =     "ACM:1986:PPF",
  pages =        "133--139",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p133-gelfond/p133-gelfond.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p133-gelfond/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p133-gelfond/",
  acknowledgement = ack-nhfb,
  keywords =     "languages; performance; reliability; theory",
  subject =      "{\bf I.2.4} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Knowledge Representation Formalisms and
                 Methods, Predicate logic. {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Nonmonotonic reasoning and belief
                 revision. {\bf H.1.m} Information Systems, MODELS AND
                 PRINCIPLES, Miscellaneous. {\bf F.4.1} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic, Computational logic.",
}

@InProceedings{Chan:1986:PCC,
  author =       "E. P. F. Chan and Paolo Atzeni",
  title =        "On the properties and characterization of
                 connection-trap-free schemes",
  crossref =     "ACM:1986:PPF",
  pages =        "140--147",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p140-chan/p140-chan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p140-chan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p140-chan/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; languages; performance; theory;
                 verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.4}
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Query processing. {\bf H.3.3} Information Systems,
                 INFORMATION STORAGE AND RETRIEVAL, Information Search
                 and Retrieval, Retrieval models. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Data models.",
}

@InProceedings{Biskup:1986:OFA,
  author =       "H. Biskup and L. Schnetgoke",
  title =        "One flavor assumption and gamma-acyclicity for
                 universal relation views",
  crossref =     "ACM:1986:PPF",
  pages =        "148--159",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p148-biskup/p148-biskup.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p148-biskup/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p148-biskup/",
  acknowledgement = ack-nhfb,
  keywords =     "design; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.4}
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Query processing. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design, Data models.",
}

@InProceedings{Sagiv:1986:ESQ,
  author =       "Yehoshua Sagiv and Oded Shmueli",
  title =        "The equivalence of solving queries and producing tree
                 projections (extended abstract)",
  crossref =     "ACM:1986:PPF",
  pages =        "160--172",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p160-sagiv/p160-sagiv.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p160-sagiv/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p160-sagiv/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.4}
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Query processing.",
}

@InProceedings{Sagiv:1986:FFA,
  author =       "Yehoshua Sagiv",
  title =        "On finite {FD}-acyclicity",
  crossref =     "ACM:1986:PPF",
  pages =        "173--182",
  year =         "1986",
  bibdate =      "Sun Nov 07 06:29:03 2004",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p173-sagiv/p173-sagiv.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p173-sagiv/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p173-sagiv/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema.",
}

@InProceedings{Ozsoyoglu:1986:UFM,
  author =       "Meral Ozsoyoglu and Li Yan Yuan",
  title =        "Unifying functional and multivalued dependencies for
                 relational database design",
  crossref =     "ACM:1986:PPF",
  pages =        "183--190",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p183-ozsoyoglu/p183-ozsoyoglu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p183-ozsoyoglu/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p183-ozsoyoglu/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema.",
}

@InProceedings{Ruland:1986:AAD,
  author =       "Detlev Ruland and Dietmar Seipel",
  title =        "Alpha-acyclic decompositions of relational database
                 schemes",
  crossref =     "ACM:1986:PPF",
  pages =        "191--201",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p191-ruland/p191-ruland.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p191-ruland/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p191-ruland/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Normal forms. {\bf G.2.2} Mathematics of
                 Computing, DISCRETE MATHEMATICS, Graph Theory, Graph
                 algorithms.",
}

@InProceedings{Graham:1986:CTM,
  author =       "Marc H. Graham and Ke Wang",
  title =        "Constant time maintenance or the triumph of the {FD}",
  crossref =     "ACM:1986:PPF",
  pages =        "202--216",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p202-graham/p202-graham.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p202-graham/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p202-graham/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Normal forms.",
}

@InProceedings{Mannila:1986:TDR,
  author =       "Heikki Mannila and Kari Jouko Raiha",
  title =        "Test data for relational queries",
  crossref =     "ACM:1986:PPF",
  pages =        "217--223",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p217-mannila/p217-mannila.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p217-mannila/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p217-mannila/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; languages; theory; verification",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf H.3.3} Information
                 Systems, INFORMATION STORAGE AND RETRIEVAL, Information
                 Search and Retrieval, Query formulation. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Schema and subschema. {\bf D.2.5} Software,
                 SOFTWARE ENGINEERING, Testing and Debugging, Testing
                 tools (e.g., data generators, coverage testing).",
}

@InProceedings{Wilkins:1986:MTA,
  author =       "Marianne Winslett Wilkins",
  title =        "A model-theoretic approach to updating logical
                 databases",
  crossref =     "ACM:1986:PPF",
  pages =        "224--234",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p224-wilkins/p224-wilkins.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p224-wilkins/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p224-wilkins/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; economics; languages; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.2.3} Information
                 Systems, DATABASE MANAGEMENT, Languages, Data
                 manipulation languages (DML). {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Nonmonotonic reasoning and belief
                 revision. {\bf H.1.m} Information Systems, MODELS AND
                 PRINCIPLES, Miscellaneous. {\bf E.1} Data, DATA
                 STRUCTURES. {\bf D.3.1} Software, PROGRAMMING
                 LANGUAGES, Formal Definitions and Theory, Semantics.
                 {\bf I.2.4} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Knowledge Representation Formalisms and
                 Methods, Predicate logic.",
}

@InProceedings{Abiteboul:1986:DPT,
  author =       "Serge Abiteboul and Victor Vianu",
  title =        "Deciding properties of transactional schemas",
  crossref =     "ACM:1986:PPF",
  pages =        "235--239",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p235-abiteboul/p235-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p235-abiteboul/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p235-abiteboul/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; languages; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema. {\bf H.2.4}
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Transaction processing. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design, Data
                 models. {\bf H.1.m} Information Systems, MODELS AND
                 PRINCIPLES, Miscellaneous. {\bf F.3.1} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS,
                 Specifying and Verifying and Reasoning about Programs,
                 Specification techniques.",
}

@InProceedings{ElAbbadi:1986:APR,
  author =       "Amr {El Abbadi} and Sam Toueg",
  title =        "Availability in partitioned replicated databases",
  crossref =     "ACM:1986:PPF",
  pages =        "240--251",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p240-el_abbadi/p240-el_abbadi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p240-el_abbadi/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p240-el_abbadi/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; reliability; theory;
                 verification",
  subject =      "{\bf C.2.4} Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Distributed Systems,
                 Distributed databases. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design, Data models. {\bf
                 H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing. {\bf C.2.2} Computer
                 Systems Organization, COMPUTER-COMMUNICATION NETWORKS,
                 Network Protocols, Protocol architecture. {\bf D.4.1}
                 Software, OPERATING SYSTEMS, Process Management,
                 Concurrency. {\bf D.4.6} Software, OPERATING SYSTEMS,
                 Security and Protection, Access controls. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Data models. {\bf C.4} Computer Systems
                 Organization, PERFORMANCE OF SYSTEMS, Reliability,
                 availability, and serviceability.",
}

@InProceedings{Vardi:1986:IDI,
  author =       "Moshe Vardi",
  title =        "On the integrity of databases with incomplete
                 information",
  crossref =     "ACM:1986:PPF",
  pages =        "252--266",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p252-vardi/p252-vardi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p252-vardi/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p252-vardi/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; languages; theory; verification",
  subject =      "{\bf H.2.0} Information Systems, DATABASE MANAGEMENT,
                 General, Security, integrity, and protection**. {\bf
                 H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf H.1.m} Information
                 Systems, MODELS AND PRINCIPLES, Miscellaneous. {\bf
                 I.2.3} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Deduction and Theorem Proving,
                 Nonmonotonic reasoning and belief revision. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Schema and subschema. {\bf F.1.3} Theory of
                 Computation, COMPUTATION BY ABSTRACT DEVICES,
                 Complexity Measures and Classes, Relations among
                 complexity classes.",
}

@InProceedings{Naughton:1986:DIR,
  author =       "Jeff Naughton",
  title =        "Data independent recursion in deductive databases",
  crossref =     "ACM:1986:PPF",
  pages =        "267--279",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p267-naughton/p267-naughton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p267-naughton/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p267-naughton/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; languages; theory",
  subject =      "{\bf I.2.4} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Knowledge Representation Formalisms and
                 Methods, Predicate logic. {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Deduction. {\bf H.2.3} Information
                 Systems, DATABASE MANAGEMENT, Languages, Query
                 languages. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Recursive function theory. {\bf G.2.2}
                 Mathematics of Computing, DISCRETE MATHEMATICS, Graph
                 Theory, Graph algorithms.",
}

@InProceedings{Cosmadakis:1986:PER,
  author =       "S. Cosmadakis and P. Kanellakis",
  title =        "Parallel evaluation of recursive rule queries",
  crossref =     "ACM:1986:PPF",
  pages =        "280--293",
  year =         "1986",
  bibdate =      "Wed Oct 25 08:47:35 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/6012/p280-cosmadakis/p280-cosmadakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/6012/p280-cosmadakis/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/6012/p280-cosmadakis/",
  acknowledgement = ack-nhfb,
  keywords =     "languages; theory; verification",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf H.2.1} Information
                 Systems, DATABASE MANAGEMENT, Logical Design. {\bf
                 D.2.8} Software, SOFTWARE ENGINEERING, Metrics,
                 Complexity measures. {\bf H.1.m} Information Systems,
                 MODELS AND PRINCIPLES, Miscellaneous. {\bf F.4.1}
                 Theory of Computation, MATHEMATICAL LOGIC AND FORMAL
                 LANGUAGES, Mathematical Logic, Recursive function
                 theory. {\bf F.1.3} Theory of Computation, COMPUTATION
                 BY ABSTRACT DEVICES, Complexity Measures and Classes,
                 Relations among complexity classes. {\bf G.1.0}
                 Mathematics of Computing, NUMERICAL ANALYSIS, General,
                 Parallel algorithms.",
}

@InProceedings{Abiteboul:1986:PTS,
  author =       "S. Abiteboul and V. Vianu",
  title =        "Properties of Transactional Schemas",
  crossref =     "ACM:1986:PPF",
  pages =        "??--??",
  month =        mar,
  year =         "1986",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  annote =       "a study of optimization for insert/delete
                 operations.",
}

@InProceedings{Neff:1987:DBC,
  author =       "R. K. Neff",
  title =        "Data bases, compound objects, and networked
                 workstations: {Beyond} distributed computing
                 {(Abstract)}",
  crossref =     "Dayal:1987:PAC",
  pages =        "1--1",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p1-neff/p1-neff.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p1-neff/",
  abstract =     "Requirements for future data base systems are
                 developed from the perspective of the user of a
                 networked workstation who naturally deals with compound
                 objects. Objects considered include full text,
                 diagrams, maps, sound recordings, images from film and
                 video and of art objects, spreadsheets, etc. Searching
                 requirements and strategies over multi-objects are also
                 considered. The context of such data base systems is
                 the library, in its electronic or digital version.
                 Comments are presented with respect to the digital
                 learning environment of the future. Current related
                 projects at Berkeley are described.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Human Factors; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Information Storage and
                 Retrieval --- General (H.3.0); Information Systems ---
                 Information Storage and Retrieval --- Systems and
                 Software (H.3.4): {\bf Information networks}; Hardware
                 --- Input/Output and Data Communications --- General
                 (B.4.0)",
}

@InProceedings{Ullman:1987:DTP,
  author =       "J. D. Ullman",
  title =        "Database theory --- past and future",
  crossref =     "ACM:1987:PPS",
  pages =        "1--10",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p1-ullman/p1-ullman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p1-ullman/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p1-ullman/",
  abstract =     "We briefly sketch the development of the various
                 branches of database theory. One important branch is
                 the theory of relational databases, including such
                 areas as dependency theory, universal-relation theory,
                 and hypergraph theory. A second important branch is the
                 theory of concurrency control and distributed
                 databases. Two other branches have not in the past been
                 given the attention they deserve. One of these is
                 ``logic and databases,'' and the second is
                 ``object-oriented database systems,'' which to my
                 thinking includes systems based on the network or
                 hierarchical data models. Both these areas are going to
                 be more influential in the future.",
  acknowledgement = ack-nhfb,
  generalterms = "Management; Theory",
  keywords =     "management; theory",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0)",
}

@InProceedings{Ingenthron:1987:TDR,
  author =       "Kurt Ingenthron",
  title =        "Thoughts on database research: a user perspective",
  crossref =     "Dayal:1987:PAC",
  pages =        "2--2",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p2-ingenthron/p2-ingenthron.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p2-ingenthron/",
  abstract =     "The future of computer aided design is in object
                 oriented programming. If the database community hopes
                 to participate in this future, it must reexamine some
                 basic assumptions about the architecture of database
                 systems. Database system functionality can be added to
                 object systems but if the performance cost is too high,
                 it will never survive. Below are some suggestions for
                 what can be done at a reasonable performance cost.
                 \par

                 The object oriented paradigm provides a more practical
                 approach to the partitioning of the global database
                 than horizontal and vertical partitioning of relational
                 tables. Each partition should itself be an independent
                 database containing related data such as the geometry
                 of a part or the spacial relationship of parts in an
                 assembly. A meta-database would be used to control
                 access to collections of these partitions. A collection
                 of partitions comprise the database for a user's design
                 session. \par

                 The overhead of traditional database transaction
                 management is not acceptable for high performance CAD
                 systems. With the partitioning scheme described above,
                 transaction management can be performed at a
                 partition/session granularity. Once the user has
                 composed the collection of partitions, he has a single
                 user database. There is no need for concurrency control
                 or transaction logging except at the meta-database
                 level. This type of transaction management can in fact
                 be more functional than traditional transaction
                 management, allowing for versioning, long transactions,
                 integrity checking and archival. \par

                 Object oriented databases need a message model, not a
                 data model. Any object which responds to the same
                 messages as an object of ``Duck'' class (walk and
                 quack) is, for all intents and purposes, a duck. An
                 attempt to design a data model based on instance
                 variables of an object or based on collections of
                 objects of like class violates the data abstraction
                 facilities of object oriented languages and diminishes
                 their power. An attempt to implement a relational
                 database system with an object oriented language yields
                 a relational database system where you get abstract
                 data types for free. It does not yield an object
                 oriented database system. \par

                 For object oriented queries, the message is the media.
                 A query can be transformed into an execution plan
                 consisting of messages sent to database objects.
                 Optimization decisions can be made by sending messages
                 to referenced objects. Collection classes can be
                 implemented for new access methods with cost and
                 selectivity methods to provide optimization
                 information. In this way, the query language can grow
                 with the application. \par

                 Data representation is an important aspect of object
                 oriented systems. Most object systems are typeless in
                 that all instance variables of an object are object
                 references. For performance sake, object systems should
                 provide enough of a type mechanism to allow simple data
                 items (integers, floats, characters, \ldots{}) to be
                 represented in the form intrinsic to the machine.
                 Methods can then be compiled for access to typed data.
                 \par

                 In conclusion, object systems provide enormous
                 potential for the development of CAD systems.
                 Performance influences the approach taken to an
                 application. WYSIWYG publishing applications were not
                 attempted until performance was adequate. Functionality
                 is what sells CAD systems. Database system
                 functionality can be added to object systems at a
                 reasonable cost.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Human Factors",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing}; Software
                 --- Operating Systems --- Communications Management
                 (D.4.4): {\bf Message sending}",
}

@InProceedings{Ioannidis:1987:QOS,
  author =       "Yannis E. Ioannidis and Eugene Wong",
  title =        "Query optimization by simulated annealing",
  crossref =     "Dayal:1987:PAC",
  pages =        "9--22",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p9-ioannidis/p9-ioannidis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p9-ioannidis/",
  abstract =     "Query optimizers of future database management systems
                 are likely to face large access plan spaces in their
                 task. Exhaustively searching such access plan spaces is
                 unacceptable. We propose a query optimization algorithm
                 based on {\em simulated annealing}, which is a
                 probabilistic hill climbing algorithm. We show the
                 specific formulation of the algorithm for the case of
                 optimizing complex non-recursive queries that arise in
                 the study of linear recursion. The query answer is
                 explicitly represented and manipulated within the {\em
                 closed semiring\/} of linear relational operators. The
                 optimization algorithm is applied to a state space that
                 is constructed from the equivalent algebraic forms of
                 the query answer. A prototype of the simulated
                 annealing algorithm has been built and few experiments
                 have been performed for a limited class of relational
                 operators. Our initial experience is that, in general,
                 the algorithm converges to processing strategies that
                 are very close to the optimal. Moreover, the
                 traditional processing strategies (e.g., the {\em
                 semi-naive evaluation\/}) have been found to be, in
                 general, suboptimal.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Query formulation}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Access
                 methods}",
}

@InProceedings{Kuper:1987:LPS,
  author =       "G. M. Kuper",
  title =        "Logic programming with sets",
  crossref =     "ACM:1987:PPS",
  pages =        "11--20",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p11-kuper/p11-kuper.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p11-kuper/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p11-kuper/",
  acknowledgement = ack-nhfb,
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design. {\bf F.4.1} Theory
                 of Computation, MATHEMATICAL LOGIC AND FORMAL
                 LANGUAGES, Mathematical Logic, Logic and constraint
                 programming. {\bf I.2.3} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving,
                 Logic programming. {\bf F.4.3} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Formal
                 Languages, Algebraic language theory.",
}

@InProceedings{Beeri:1987:SNL,
  author =       "C. Beeri and S. Naqvi and R. Ramakrishnan and O.
                 Shmueli and S. Tsur",
  title =        "Sets and negation in a logic data base language
                 {(LDL1)}",
  crossref =     "ACM:1987:PPS",
  pages =        "21--37",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p21-beeri/p21-beeri.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p21-beeri/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p21-beeri/",
  abstract =     "In this paper we extend LDL, a Logic Based Database
                 Language, to include finite sets and negation. The new
                 language is called LDL1. We define the notion of a
                 model and show that a negation-free program need not
                 have a model, and that it may have more than one
                 minimal model. We impose syntactic restriction in order
                 to define a deterministic language. These restrictions
                 allow only layered (stratified) programs. We prove that
                 for any program satisfying the syntactic restrictions
                 of layering, there is a minimal model, and that this
                 model can be constructed in a bottom-up fashion.
                 Extensions to the basic grouping mechanism are
                 proposed. We show that these extensions can be
                 translated into equivalent LDL1 programs. Finally, we
                 show how the technique of magic sets can be extended to
                 translate LDL1 programs into equivalent programs which
                 can often be executed more efficiently",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Management; Theory; Verification",
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf D.3.2} Software, PROGRAMMING LANGUAGES, Language
                 Classifications. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Logic and constraint programming. {\bf I.2.3}
                 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Deduction and Theorem Proving, Logic programming. {\bf
                 D.3.1} Software, PROGRAMMING LANGUAGES, Formal
                 Definitions and Theory.",
}

@InProceedings{Ganski:1987:ONS,
  author =       "Richard A. Ganski and Harry K. T. Wong",
  title =        "Optimization of nested {SQL} queries revisited",
  crossref =     "Dayal:1987:PAC",
  pages =        "23--33",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p23-ganski/p23-ganski.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p23-ganski/",
  abstract =     "Current methods of evaluating nested queries in the
                 SQL language can be inefficient in a variety of query
                 and data base contexts. Previous research in the area
                 of nested query optimization which sought methods of
                 reducing evaluation costs is summarized, including a
                 classification scheme for nested queries, algorithms
                 designed to transform each type of query to a logically
                 equivalent form which may then be evaluated more
                 efficiently, and a description of a major bug in one of
                 these algorithms. Further examination reveals another
                 bug in the same algorithm. Solutions to these bugs are
                 proposed and incorporated into a new transformation
                 algorithm, and extensions are proposed which will allow
                 the transformation algorithms to handle a larger class
                 of predicates. A recursive algorithm for processing a
                 general nested query is presented and the action of
                 this algorithm is demonstrated. This algorithm can be
                 used to transform any nested query.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf SQL}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Query formulation}",
}

@InProceedings{Abiteboul:1987:RQS,
  author =       "Serge Abiteboul and Paris Kanellakis and Gosta
                 Grahne",
  title =        "On the representation and querying of sets of possible
                 worlds",
  crossref =     "Dayal:1987:PAC",
  pages =        "34--48",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p34-abiteboul/p34-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p34-abiteboul/",
  abstract =     "We represent a {\em set of possible worlds\/} using an
                 incomplete information database. The representation
                 techniques that we study form a hierarchy, which
                 generalizes relations of constants. This hierarchy
                 ranges from the very simple Codd-table, (i.e., a
                 relation of constants and distinct variables called
                 nulls, which stand for values present but unknown), to
                 much more complex mechanisms involving views on
                 conditioned-tables, (i.e., queries on Codd-tables
                 together with conditions). The views we consider are
                 the queries that have polynomial data-complexity on
                 complete information databases. Our conditions are
                 conjunctions of equalities and inequalities. \par

                 (1) We provide matching upper and lower bounds on the
                 data-complexity of testing {\em containment}, {\em
                 membership}, and {\em uniqueness\/} for sets of
                 possible worlds and we fully classify these problems
                 with respect to our representation hierarchy. The most
                 surprising result in this classification is that it is
                 complete in $2^p$, whether a set of possible worlds
                 represented by a Codd-table is a subset of a set of
                 possible worlds represented by a Codd-table with one
                 conjunction of inequalities. \par

                 (2) We investigate the data-complexity of querying
                 incomplete information databases. We examine both
                 asking for {\em certain facts\/} and for {\em possible
                 facts}. Our approach is algebraic but our bounds also
                 apply to logical databases. We show that asking for a
                 certain fact is coNP-complete, even for a fixed first
                 order query on a Codd-table. We thus strengthen a lower
                 bound of [16], who showed that this holds for a
                 Codd-table with a conjunction of inequalities. For each
                 fixed positive existential query we present a
                 polynomial algorithm solving the bounded possible fact
                 problem of this query on conditioned-tables. We show
                 that our approach is, in a sense, the best possible, by
                 deriving two NP-completeness lower bounds for the
                 bounded possible fact problem when the fixed query
                 contains either negation or recursion.",
  acknowledgement = ack-nhfb,
  generalterms = "Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3): {\bf Uncertainty,
                 ``fuzzy,'' and probabilistic reasoning}; Computing
                 Methodologies --- Artificial Intelligence --- Knowledge
                 Representation Formalisms and Methods (I.2.4): {\bf
                 Relation systems}",
}

@InProceedings{Yuan:1987:LDR,
  author =       "L. Y. Yuan and Z. M. Ozsoyoglu",
  title =        "Logical design of relational database schemes",
  crossref =     "ACM:1987:PPS",
  pages =        "38--47",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p38-yuan/p38-yuan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p38-yuan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p38-yuan/",
  abstract =     "We define extended conflict free dependencies in the
                 context of functional and multivalued dependencies, and
                 prove that there exists an acyclic, dependency
                 preserving, 4NF database scheme if and only if the
                 given set of dependencies has an extended conflict free
                 cover. This condition can be checked in polynomial
                 time. A polynomial time algorithm to obtain such a
                 scheme for a given extended conflict free set of
                 dependencies is also presented. The result is also
                 applicable when the data dependencies consists of only
                 functional dependencies, giving the necessary and
                 sufficient condition for an acyclic, dependency
                 preserving BCNF database scheme",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Management; Theory; Verification",
  keywords =     "algorithms; design; management; theory; verification",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design, Schema and
                 subschema.",
}

@InProceedings{Chan:1987:DDS,
  author =       "E. P. F. Chan and H. J. Hernandez",
  title =        "On designing database schemes bounded or constant-time
                 maintainable with respect to functional dependencies",
  crossref =     "ACM:1987:PPS",
  pages =        "48--57",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p48-chan/p48-chan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p48-chan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p48-chan/",
  abstract =     "Under the weak instance model, to determine if a class
                 of database schemes is bounded with respect to
                 dependencies is fundamental for the analysis of the
                 behavior of the class of database schemes with respect
                 to query processing and updates. However, proving that
                 a class of database schemes is bounded with respect to
                 dependencies seems to be very difficult even for
                 restricted cases. To resolve this problem, we need to
                 develop techniques for characterizing bounded database
                 schemes \par

                 In this paper, we give a formal methodology for
                 designing database schemes bounded with respect to
                 functional dependencies using a new technique called
                 extensibility. This methodology can also be used to
                 design constant-time-maintainable database schemes",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Theory; Verification",
  keywords =     "design; management; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema.",
}

@InProceedings{Sacca:1987:MCM,
  author =       "Domenico Sacca and Carlo Zaniolo",
  title =        "Magic counting methods",
  crossref =     "Dayal:1987:PAC",
  pages =        "49--59",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p49-sacca/p49-sacca.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p49-sacca/",
  abstract =     "{\em The problem considered is that of implementing
                 recursive queries, expressed in a logic-based language,
                 by efficient fixpoint computations. In particular, the
                 situation is studied where the initial bindings in the
                 recursive predicate can be used to restrict the search
                 space and ensure safety of execution. Two key
                 techniques previously proposed to solve this problem
                 are (i) the highly efficient counting method, and (ii)
                 the magic set method which is safe in a wider range of
                 situations than (i). In this paper, we present a family
                 of methods, called the magic counting methods, that
                 combines the advantages of (i) and (ii). This is made
                 possible by the similarity of the strategies used by
                 the counting method and the magic set method for
                 propagating the bindings. This paper introduces these
                 new methods, examines their computational complexity,
                 and illustrates the trade-offs between the family
                 members and their superiority with respect to the old
                 methods}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3); Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Recursive function
                 theory}; Theory of Computation --- Mathematical Logic
                 and Formal Languages --- Grammars and Other Rewriting
                 Systems (F.4.2); Theory of Computation --- Analysis of
                 Algorithms and Problem Complexity --- Numerical
                 Algorithms and Problems (F.2.1): {\bf Number-theoretic
                 computations}",
}

@InProceedings{Gottlob:1987:CCE,
  author =       "G. Gottlob",
  title =        "Computing covers for embedded functional
                 dependencies",
  crossref =     "ACM:1987:PPS",
  pages =        "58--69",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p58-gottlob/p58-gottlob.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p58-gottlob/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p58-gottlob/",
  abstract =     "This paper deals with the problem of computing covers
                 for the functional dependencies embedded in a subset of
                 a given relation schema. We show how this problem can
                 be simplified and present a new and efficient algorithm
                 ``Reduction. By Resolution'' (RBR) for its solution.
                 Though the problem of computing covers for embedded
                 dependencies is inherently exponential, our algorithm
                 behaves polynomially for several classes of inputs. RBR
                 can be used for the solution of some related problems
                 in the theory of database design, such as deciding
                 whether a given database scheme is in Boyce-Codd Normal
                 Form or decomposing a scheme into Boyce-Codd Normal
                 Form.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Management; Theory; Verification",
  keywords =     "algorithms; design; management; theory; verification",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema.",
}

@InProceedings{Aly:1987:NDM,
  author =       "Hussien Aly and Z. Meral Ozsoyoglu",
  title =        "Non-deterministic modelling of logical queries in
                 deductive databases",
  crossref =     "Dayal:1987:PAC",
  pages =        "60--72",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p60-aly/p60-aly.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p60-aly/",
  abstract =     "We propose a technique based on Petri Nets formalism
                 to model logic queries in deductive databases. The
                 model is called PNLP (Petri Net model for Logic
                 Programs), and it has a simple formal description and a
                 graphical representation. The PNLP model explicitly
                 represents the relationships between rules and
                 predicates. It is general and flexible enough to
                 demonstrate the flow of control in different algorithms
                 used to evaluate recursive logic queries. In fact the
                 model unifies the level of description of these
                 algorithms, and facilitates identifying similarities
                 and differences between them. The inherent
                 non-determinism in the PNLP model may also be useful in
                 recognizing the parallelism within Horn-clause logic
                 programs. In this paper, the PNLP model is described,
                 and its functionality is demonstrated by modeling
                 several existing algorithms for recursive query
                 evaluation.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Mathematical Logic (F.4.1): {\bf Logic and
                 constraint programming}; Mathematics of Computing ---
                 Discrete Mathematics --- Graph Theory (G.2.2): {\bf
                 Network problems}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Recursive function
                 theory}",
}

@InProceedings{DAtri:1987:DQI,
  author =       "A. D'Atri and P. {Di Felice} and M. Moscarini",
  title =        "Dynamic query interpretation in relational databases",
  crossref =     "ACM:1987:PPS",
  pages =        "70--78",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p70-d_atri/p70-d_atri.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p70-d_atri/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p70-d_atri/",
  abstract =     "A new dynamic approach to the problem of determining
                 the correct interpretation of a logically independent
                 query to a relational database is described. The
                 proposed disambiguating process is based on a simple
                 user-system dialogue that consists in a sequence of
                 decisions about the relevance (or not) of an attribute
                 with respect to the user interpretation",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Theory",
  keywords =     "design; management; theory",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Query processing. {\bf
                 H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Schema and subschema.",
}

@InProceedings{Han:1987:HRP,
  author =       "Jiawei Han and Lawrence J. Henschen",
  title =        "Handling redundancy in the processing of recursive
                 database queries",
  crossref =     "Dayal:1987:PAC",
  pages =        "73--81",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p73-han/p73-han.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p73-han/",
  abstract =     "Redundancy may exist in the processing of recursive
                 database queries at four different levels
                 precompilation level, iteration level, tuple processing
                 level and file accessing level. Techniques for reducing
                 redundant work at each level are studied. In the
                 precompilation level, the optimization techniques
                 include removing redundant parts in a rule cluster,
                 simplifying recursive clusters and sharing common
                 subexpressions among rules. At the iteration level, the
                 techniques discussed are the use of frontier relations
                 and the counting method. At the tuple processing level,
                 we use merging and filtering methods to exclude
                 processed drivers from database reaccessing. Finally,
                 at the file accessing level, I/O cost can be further
                 reduced by level relaxation. We conclude that even for
                 complex recursion, redundant database processing can be
                 considerably reduced or eliminated by developing
                 appropriate algorithms.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Mathematical Logic (F.4.1): {\bf Recursive function
                 theory}",
}

@InProceedings{Atzeni:1987:NBW,
  author =       "P. Atzeni and M. C. {De Bernardis}",
  title =        "A new basis for the weak instance model",
  crossref =     "ACM:1987:PPS",
  pages =        "79--86",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p79-atzeni/p79-atzeni.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p79-atzeni/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p79-atzeni/",
  abstract =     "A new definition of the weak instance model is
                 presented, which does not consider the missing values
                 as existent though unknown, but just assumes that no
                 information is available about them. It is possible to
                 associate with the new definition logical theories that
                 do not contain universally quantified variables. The
                 new model enjoys various desirable properties of the
                 old weak instance model, with respect to dependency
                 satisfaction, query answering, and associated logical
                 theories.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Theory",
  keywords =     "design; management; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models.",
}

@InProceedings{Daniels:1987:DLT,
  author =       "Dean S. Daniels and Alfred Z. Spector and Dean S.
                 Thompson",
  title =        "Distributed logging for transaction processing",
  crossref =     "Dayal:1987:PAC",
  pages =        "82--96",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p82-daniels/p82-daniels.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p82-daniels/",
  abstract =     "Increased interest in using workstations and small
                 processors for distributed transaction processing
                 raises the question of how to implement the logs needed
                 for transaction recovery. Although logs can be
                 implemented with data written to duplexed disks on each
                 processing node, this paper argues there are advantages
                 if log data is written to multiple {\em log server\/}
                 nodes. A simple analysis of expected logging loads
                 leads to the conclusion that a high performance,
                 microprocessor based processing node can support a log
                 server if it uses efficient communication protocols and
                 low latency, non volatile storage to buffer log data.
                 The buffer is needed to reduce the processing time per
                 log record and to increase throughput to the logging
                 disk. An interface to the log servers using simple,
                 robust, and efficient protocols is presented. Also
                 described are the disk data structures that the log
                 servers use. This paper concludes with a brief
                 discussion of remaining design issues, the status of a
                 prototype implementation, and plans for its
                 completion.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Database Administration (H.2.7): {\bf
                 Logging and recovery}",
}

@InProceedings{Malvestuto:1987:AQC,
  author =       "F. M. Malvestuto",
  title =        "Answering queries in categorical databases",
  crossref =     "ACM:1987:PPS",
  pages =        "87--96",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p87-malvestuto/p87-malvestuto.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p87-malvestuto/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p87-malvestuto/",
  abstract =     "A compatible categorical data base can be viewed as a
                 single (contingency) table by taking the {\em
                 maximum-entropy\/} extension of the component tables.
                 Such a view, here called {\em universal table model,\/}
                 is needed to answer a user who wishes
                 ``cross-classified'' categorical data, that is,
                 categorical data resulting from the combination of the
                 information contents of two or more base tables. In
                 order to implement a {\em universal table interface\/}
                 we make use of a query-optimization procedure, which is
                 able to generate an appropriate answer both in the case
                 that the asked data are present in the data base and in
                 the case that they are not and, then, have to be
                 estimated",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Theory; Verification",
  keywords =     "design; management; theory; verification",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf H.2.m} Information
                 Systems, DATABASE MANAGEMENT, Miscellaneous.",
}

@InProceedings{Herman:1987:DAV,
  author =       "Gary Herman and K. C. Lee and Abel Weinrib",
  title =        "The datacycle architecture for very high throughput
                 database systems",
  crossref =     "Dayal:1987:PAC",
  pages =        "97--103",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p97-herman/p97-herman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p97-herman/",
  abstract =     "{\em The evolutionary trend toward a database-driven
                 public communications network has motivated research
                 into database architectures capable of executing
                 thousands of transactions per second. In this paper we
                 introduce the Datacycle architecture, an attempt to
                 exploit the enormous transmission bandwidth of optical
                 systems to permit the implementation of high throughput
                 multiprocessor database systems. The architecture has
                 the potential for unlimited query throughput,
                 simplified data management, rapid execution of complex
                 queries, and efficient concurrency control. We describe
                 the logical operation of the architecture and discuss
                 implementation issues in the context of a prototype
                 system currently under construction}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Computer Systems Organization ---
                 Computer-Communication Networks --- Network
                 Architecture and Design (C.2.1): {\bf Network
                 communications}; Computer Systems Organization ---
                 Computer-Communication Networks --- Distributed Systems
                 (C.2.4): {\bf Network operating systems}; Information
                 Systems --- Database Management --- General (H.2.0)",
}

@InProceedings{Fekete:1987:NTR,
  author =       "A. Fekete and N. Lynch and M. Merrit and W. Weihl",
  title =        "Nested transactions and read-write locking",
  crossref =     "ACM:1987:PPS",
  pages =        "97--111",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p97-fekete/p97-fekete.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p97-fekete/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p97-fekete/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; management; verification",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Transaction processing.",
}

@InProceedings{Lehman:1987:RAH,
  author =       "Tobin J. Lehman and Michael J. Carey",
  title =        "A recovery algorithm for a high-performance
                 memory-resident database system",
  crossref =     "Dayal:1987:PAC",
  pages =        "104--117",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p104-lehman/p104-lehman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p104-lehman/",
  abstract =     "With memory prices dropping and memory sizes
                 increasing accordingly, a number of researchers are
                 addressing the problem of designing high-performance
                 database systems for managing memory-resident data. In
                 this paper we address the recovery problem in the
                 context of such a system. We argue that existing
                 database recovery schemes fall short of meeting the
                 requirements of such a system, and we present a new
                 recovery mechanism which is designed to overcome their
                 shortcomings. The proposed mechanism takes advantage of
                 a few megabytes of reliable memory in order to organize
                 recovery information on a per ``object'' basis. As a
                 result, it is able to amortize the cost of checkpoints
                 over a controllable number of updates, and it is also
                 able to separate post-crash recovery into two
                 phases--high-speed recovery of data which is needed
                 immediately by transactions, and background recovery of
                 the remaining portions of the database. A simple
                 performance analysis is undertaken, and the results
                 suggest our mechanism should perform well in a
                 high-performance, memory-resident database
                 environment.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2); Computer Systems Organization
                 --- Performance of Systems (C.4); Information Systems
                 --- Database Management --- Systems (H.2.4)",
}

@InProceedings{Segall:1987:TCM,
  author =       "A. Segall and O. Wolfson",
  title =        "Transaction commitment at minimal communication cost",
  crossref =     "ACM:1987:PPS",
  pages =        "112--118",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p112-segall/p112-segall.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p112-segall/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p112-segall/",
  abstract =     "We consider the communication protocol for transaction
                 commitment in a distributed database. Specifically, the
                 connection between the structure of communication among
                 the participating sites, and the communication network
                 topology is investigated. In order to do so, the cost
                 of transaction commitment is defined as the number of
                 network hops that messages of the protocol must
                 traverse. We establish the necessary cost for
                 transaction commitment, and show that it is also
                 sufficient. A simple distributed algorithm is presented
                 to prove sufficiency. Our algorithm is also
                 time-efficient, and in order to prove that we show that
                 the timing of our algorithm is optimal within a natural
                 class of commit-protocols.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Management; Standardization;
                 Theory; Verification",
  keywords =     "algorithms; design; management; standardization;
                 theory; verification",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2); Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Distributed databases};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Trees}",
}

@InProceedings{Nixon:1987:ICS,
  author =       "Brian Nixon and Lawrence Chung and John Mylopoulos and
                 David Lauzon and Alex Borgida and M. Stanley",
  title =        "Implementation of a compiler for a semantic data
                 model: {Experiences} with taxis",
  crossref =     "Dayal:1987:PAC",
  pages =        "118--131",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p118-nixon/p118-nixon.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p118-nixon/",
  abstract =     "The features of a compiler for the Taxis design
                 language are described and discussed. Taxis offers an
                 entity-based framework for designing interactive
                 information systems and supports generalisation,
                 classification and aggregation as abstraction
                 mechanisms. Its features include multiple inheritance
                 of attributes, isA hierarchies of transactions,
                 metaclasses, typed attributes, a procedural
                 exception-handling mechanism and an iteration construct
                 based on the abstraction mechanisms supported
                 Developing a compiler for the language involved dealing
                 with the problems of efficiently representing and
                 accessing a large collection of entities, performing
                 (static) type checking and representing isA hierarchies
                 of transactions.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Performance; Theory",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Compilers}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf TAXIS}",
}

@InProceedings{Wang:1987:PAM,
  author =       "C. P. Wang and V. O. K. Li",
  title =        "The precedence-assignment model for distributed
                 databases concurrency control algorithms",
  crossref =     "ACM:1987:PPS",
  pages =        "119--128",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p119-wang/p119-wang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p119-wang/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p119-wang/",
  abstract =     "We have developed a unified model, called the
                 precedence-assignment model (PAM), of concurrency
                 control algorithms in distributed database. It is shown
                 that two-phase locking timestamp-ordering and other
                 existing concurrency control algorithms may be modeled
                 by PAM. We have also developed a new concurrency
                 control algorithm under the PAM modeling framework,
                 which is free from deadlocks and transaction restarts.
                 Finally, a unified concurrency control subsystem for
                 precedence-assignment algorithms is developed. By using
                 this subsystem, different transactions may be executed
                 under different concurrency control algorithms
                 simultaneously.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Management; Theory; Verification",
  keywords =     "algorithms; design; management; theory; verification",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}",
}

@InProceedings{Hadzilacos:1987:KTA,
  author =       "V. Hadzilacos",
  title =        "A knowledge-theoretic analysis of atomic commitment
                 protocols",
  crossref =     "ACM:1987:PPS",
  pages =        "129--134",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p129-hadzilacos/p129-hadzilacos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p129-hadzilacos/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p129-hadzilacos/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; management; standardization; theory;
                 verification",
  subject =      "{\bf C.2.2} Computer Systems Organization,
                 COMPUTER-COMMUNICATION NETWORKS, Network Protocols.
                 {\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Distributed databases. {\bf G.m} Mathematics
                 of Computing, MISCELLANEOUS.",
}

@InProceedings{Lyngbaek:1987:MSD,
  author =       "Peter Lyngbaek and Victor Vianu",
  title =        "Mapping a semantic database model to the relational
                 model",
  crossref =     "Dayal:1987:PAC",
  pages =        "132--142",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p132-lyngbaek/p132-lyngbaek.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p132-lyngbaek/",
  abstract =     "The connection between semantic database models and
                 the relational model is formally investigated using the
                 Iris Data Model, which has been implemented using
                 relational database techniques. The results focus on
                 properties of relational schemas that are translations
                 of Iris schemas. Two new types of constraints,
                 cross-product constraints and multiplicity constraints
                 are introduced to characterize the relational
                 translations of Iris schemas. The connection
                 established between Iris and relational schemas also
                 yields new, unexpected information about Iris schemas.
                 In particular, a notion of equivalence of Iris schemas
                 is defined using their relational translations, and a
                 result is obtained on simplifying the type structure of
                 Iris schemas.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf IRIS}",
}

@InProceedings{Minker:1987:PDD,
  author =       "J. Minker",
  title =        "Perspectives in deductive databases {(Abstract
                 only)}",
  crossref =     "ACM:1987:PPS",
  pages =        "135--136 (or 135--135??)",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p135-minker/p135-minker.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p135-minker/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p135-minker/",
  abstract =     "I will discuss my experiences, some of the work that I
                 have done and related work that influenced me,
                 concerning deductive databases over the last 30 years.
                 It will be convenient to divide this time period into
                 roughly three equal parts, 1957 - 1968, 1969 - 1978,
                 1979 - present. For the first portion I will describe
                 how my interest started in deductive databases in 1957,
                 at a time when not even the field of databases existed
                 I will describe work in the beginning years, leading to
                 the start of deductive databases in about 1968 with the
                 work of Cordell Green and Bertram Raphael. \par

                 The second period saw a great deal of work in theorem
                 proving as well as the introduction of logic
                 programming. The existence and importance of deductive
                 databases as a formal and viable discipline received
                 its impetus at a workshop held in Toulouse, France, in
                 1977, which culminated in the book, Logic and Data
                 Bases. The relationship of deductive databases and
                 logic programming was recognized at that time. During
                 the third and most recent period we have seen formal
                 theories of databases come about as an outgrowth of
                 that work, and the recognition that artificial
                 intelligence and deductive databases are closely
                 related, at least through the so-called expert database
                 systems. I expect that the relationships between
                 techniques from formal logic, databases, logic
                 programming, and artificial intelligence will continue
                 to be explored and the field of deductive databases
                 will become a more prominent area of computer science
                 in coming years.",
  acknowledgement = ack-nhfb,
  generalterms = "Management",
  keywords =     "management",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf I.2.1} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Applications and Expert
                 Systems.",
}

@InProceedings{Apt:1987:MSD,
  author =       "K. Apt and J. M. Pugin",
  title =        "Maintenance of stratified databases viewed as a belief
                 revision system",
  crossref =     "ACM:1987:PPS",
  pages =        "136--145",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p136-apt/p136-apt.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p136-apt/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p136-apt/",
  abstract =     "We study here declarative and dynamic aspects of
                 non-monotonic reasoning in the context of deductive
                 databases. More precisely, we consider here maintenance
                 of a special class of indefinite deductive databases,
                 called stratified databases, introduced in Apt, Blair
                 and Walker [ABW] and Van Gelder [VG] in which recursion
                 ``through'' negation is disallowed. \par

                 A stratified database has a natural model associated
                 with it which is selected as its intended meaning. The
                 maintenance problem for these databases is complicated
                 because insertions can lead to deletions and vice
                 versa. \par

                 To solve this problem we make use of the ideas present
                 in the works of Doyle [D] and de Kleer [dK] on belief
                 revision systems. We offer here a number of solutions
                 which differ in the amount of static and dynamic
                 information used and the form of support introduced. We
                 also discuss the implementation issues and the
                 trade-offs involved.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Theory",
  keywords =     "design; management; theory",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf I.2.3} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Deduction and Theorem Proving,
                 Nonmonotonic reasoning and belief revision.",
}

@InProceedings{Roth:1987:DRD,
  author =       "Mark A. Roth and Henry F. Korth",
  title =        "The design of {$1$NF} relational databases into nested
                 normal form",
  crossref =     "Dayal:1987:PAC",
  pages =        "143--159",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p143-roth/p143-roth.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p143-roth/",
  abstract =     "We develop new algorithms for the design of non first
                 normal form relational databases that are in nested
                 normal form. Previously, a set of given multivalued
                 dependencies and those multivalued dependencies implied
                 by given functional dependencies were used to obtain a
                 nested normal form decomposition of a scheme. This
                 method ignored the semantic distinction between
                 functional and multivalued dependencies and utilized
                 only full multivalued dependencies in the design
                 process. We propose new algorithms which take advantage
                 of this distinction, and use embedded multivalued
                 dependencies to enhance the decomposition. This results
                 in further elimination of redundancy due to functional
                 dependencies in nested normal form designs.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Normal forms}",
}

@InProceedings{Hegner:1987:SIP,
  author =       "S. Hegner",
  title =        "Specification and implementation of programs for
                 updating incomplete information databases",
  crossref =     "ACM:1987:PPS",
  pages =        "146--158",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p146-hegner/p146-hegner.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p146-hegner/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p146-hegner/",
  abstract =     "The problem of updating incomplete information
                 databases is examined as a programming problem. From
                 this point of view formal denotational semantics are
                 developed for two applicative programming languages,
                 BLU and HLU. BLU is a very simple language with only
                 five primitives, and is designed primarily as a tool
                 for the implementation of higher level languages. The
                 semantics of BLU are formally developed at two levels
                 possible worlds and clausal and the latter is shown to
                 be a correct implementation of the former. HLU is a
                 user level update language. It is defined entirely in
                 terms of BLU, and so immediately inherits its semantic
                 definition from that language. This demonstrates a
                 level of completeness for BLU as a level of primitives
                 for update language implementation. The necessity of a
                 particular BLU primitive, {\em masking}, suggests that
                 there is a high degree of inherent complexity in
                 updating logical databases.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Management; Theory",
  keywords =     "algorithms; languages; management; theory",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf D.3.2} Software, PROGRAMMING
                 LANGUAGES, Language Classifications. {\bf F.3.2} Theory
                 of Computation, LOGICS AND MEANINGS OF PROGRAMS,
                 Semantics of Programming Languages, Denotational
                 semantics. {\bf H.2.1} Information Systems, DATABASE
                 MANAGEMENT, Logical Design, Schema and subschema.",
}

@InProceedings{Biliris:1987:OSL,
  author =       "A. Biliris",
  title =        "Operation specific locking in {B}-trees",
  crossref =     "ACM:1987:PPS",
  pages =        "159--169",
  month =        mar,
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p159-biliris/p159-biliris.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p159-biliris/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p159-biliris/",
  abstract =     "B-trees have been used as an access and for both
                 primary and secondary indexing for quite some time.
                 This paper presents a deadlock free locking mechanism
                 in which different processes make use of different lock
                 types in order to reach the leaf nodes. The
                 compatibility relations among locks on a node, do not
                 exclusively depend on their type, but also on the node
                 status and the number and kind of processes acting
                 currently on the node. As a result, a number of
                 insertion or deletion processes can operate
                 concurrently on a node. The paper presents an
                 appropriate recovery strategy in case of failure, and
                 discusses the protocol modifications that are required
                 so it can be used in other similar structures such as B
                 + -trees, compressed B-trees, and R-trees for spatial
                 searching.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Standardization; Theory",
  keywords =     "design; management; standardization; theory",
  subject =      "{\bf H.2.2} Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Deadlock avoidance. {\bf G.2.2}
                 Mathematics of Computing, DISCRETE MATHEMATICS, Graph
                 Theory, Trees. {\bf I.2.8} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Problem Solving, Control
                 Methods, and Search, Graph and tree search
                 strategies.",
}

@InProceedings{Graefe:1987:EOG,
  author =       "Goetz Graefe and David J. DeWitt",
  title =        "The {EXODUS} optimizer generator",
  crossref =     "Dayal:1987:PAC",
  pages =        "160--172",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p160-graefe/p160-graefe.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p160-graefe/",
  abstract =     "This paper presents the design and an initial
                 performance evaluation of the query optimizer generator
                 designed for the EXODUS extensible database system.
                 Algebraic transformation rules are translated into an
                 executable query optimizer, which transforms query
                 trees and selects methods for executing operations
                 according to cost functions associated with the
                 methods. The search strategy avoids exhaustive search
                 and it modifies itself to take advantage of past
                 experience. Computational results show that an
                 optimizer generated for a relational system produces
                 access plans almost as good as those produced by
                 exhaustive search, with the search time cut to a small
                 fraction.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Query formulation}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf EXODUS}",
}

@InProceedings{Nurmi:1987:CCD,
  author =       "O. Nurmi and E. Soisalon-Soininen and D. Wood",
  title =        "Concurrency Control in Database Structures with
                 Relaxed Balance",
  crossref =     "ACM:1987:PPS",
  pages =        "170--176",
  month =        mar,
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p170-nurmi/p170-nurmi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p170-nurmi/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p170-nurmi/",
  abstract =     "We consider the separation of rebalancing from updates
                 in several database structures, such as B-trees for
                 external and AVL-trees for internal structures. We show
                 how this separation can be implemented such that
                 rebalancing is performed by local background processes.
                 Our solution implies that even simple locking schemes
                 (without additional links and copies of certain nodes)
                 for concurrency control are efficient in the sense that
                 at any time only a small constant number of nodes must
                 be locked.",
  acknowledgement = ack-nhfb,
  annote =       "temporary layer block is inserted in Btree so split
                 does not propagate up. Cleanup as in Sagiv,Y. 86. Can
                 solve variable-length entry problem.",
  generalterms = "Design; Management; Theory",
  keywords =     "design; management; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Concurrency. {\bf I.2.8} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Problem
                 Solving, Control Methods, and Search, Graph and tree
                 search strategies. {\bf G.2.2} Mathematics of
                 Computing, DISCRETE MATHEMATICS, Graph Theory, Trees.",
}

@InProceedings{Freytag:1987:RBV,
  author =       "Johann Christoph Freytag",
  title =        "A rule-based view of query optimization",
  crossref =     "Dayal:1987:PAC",
  pages =        "173--180",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p173-freytag/p173-freytag.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p173-freytag/",
  abstract =     "The query optimizer is an important system component
                 of a relational database management system (DBMS). It
                 is the responsibility of this component to translate
                 the user-submitted query - usually written in a
                 non-procedural language - into an efficient query
                 evaluation plan (QEP) which is then executed against
                 the database. The research literature describes a wide
                 variety of optimization strategies for different query
                 languages and implementation environments. However,
                 very little is known about how to design and structure
                 the query optimization component to implement these
                 strategies. \par

                 This paper proposes a first step towards the design of
                 a {\em modular query optimizer}. We describe its
                 operations by {\em transformation rules\/} which
                 generate different QEPs from initial query
                 specifications. As we distinguish different aspects of
                 the query optimization process, our hope is that the
                 approach taken in this paper will contribute to the
                 more general goal of a modular query optimizer as part
                 of an extensible database management system.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Query formulation}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Information Systems --- Database
                 Management --- Systems (H.2.4); Theory of Computation
                 --- Mathematical Logic and Formal Languages ---
                 Grammars and Other Rewriting Systems (F.4.2)",
}

@InProceedings{Sun:1987:PRM,
  author =       "R. Sun and G. Thomas",
  title =        "Performance results on multiversion timestamp
                 concurrency control with predeclared writesets",
  crossref =     "ACM:1987:PPS",
  pages =        "177--184",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p177-sun/p177-sun.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p177-sun/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p177-sun/",
  acknowledgement = ack-nhfb,
  keywords =     "management; measurement; performance;
                 standardization",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Concurrency. {\bf C.4} Computer Systems
                 Organization, PERFORMANCE OF SYSTEMS, Modeling
                 techniques.",
}

@InProceedings{Shenoy:1987:SSQ,
  author =       "Sreekumar T. Shenoy and Z. Meral Ozsoyoglu",
  title =        "A system for semantic query optimization",
  crossref =     "Dayal:1987:PAC",
  pages =        "181--195",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p181-shenoy/p181-shenoy.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p181-shenoy/",
  abstract =     "This paper describes a scheme to utilize semantic
                 integrity constraints in optimizing a user specified
                 query. The scheme uses a graph theoretic approach to
                 identify redundant join clauses and redundant
                 restriction clauses specified in a user query. An
                 algorithm is suggested to eliminate such redundant
                 joins and avoid unnecessary restrictions. In addition
                 to these eliminations, the algorithm aims to introduce
                 as many restrictions on indexed attributes as possible,
                 thus yielding an equivalent, but potentially more
                 profitable, form of the original query.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Performance",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Query formulation}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}",
}

@InProceedings{Dechter:1987:DAR,
  author =       "R. Dechter",
  title =        "Decomposing an {$N$-ary} Relation into a Tree of
                 Binary Relations",
  crossref =     "ACM:1987:PPS",
  pages =        "185--189",
  month =        mar,
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 http://www.acm.org/pubs/toc/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p185-dechter/p185-dechter.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p185-dechter/",
  abstract =     "We present an efficient algorithm for decomposing an
                 $n$-ary relation into a tree of binary relations, and
                 provide an efficient test for checking whether or not
                 the tree formed represents the relation. If there
                 exists a tree-decomposition, the algorithm is
                 guaranteed to find one, otherwise, the tree generated
                 will fail the test, then indicating that no tree
                 decomposition exist. The unique features of the
                 algorithm presented in this paper, is that it does not
                 a priori assume any dependencies in the initial
                 relation, rather it derives such dependencies from the
                 bare relation instance.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Management; Theory; Verification",
  keywords =     "algorithms; management; theory; verification",
  subject =      "{\bf G.2.2} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Graph Theory, Trees. {\bf I.2.8} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Problem
                 Solving, Control Methods, and Search, Graph and tree
                 search strategies.",
}

@InProceedings{Delgrande:1987:FLA,
  author =       "J. P. Delgrande",
  title =        "Formal limits on the automatic generation and
                 maintenance of integrity constraints",
  crossref =     "ACM:1987:PPS",
  pages =        "190--196",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p190-delgrande/p190-delgrande.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p190-delgrande/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p190-delgrande/",
  abstract =     "A formal approach to the automatic generation and
                 maintenance of integrity constraints in relational
                 databases is presented. It is assumed that some portion
                 of the database extension is known and that constraints
                 are to be formed on the basis of this portion. Since
                 this portion may be updated or new relations added to
                 the database the set of hypothesised constraints may
                 require occasional revision. The goal is this paper is
                 to characterise those constraints that may potentially
                 be formed on the basis of a part of the extension.
                 Formal systems are derived by means of which the set of
                 constraints that can be formed is precisely specified.
                 A procedure is derived for restoring the consistency of
                 a set of constraints after conflicting tuples are
                 encountered. It is shown that the set of constraints to
                 which the procedure may be applied corresponds with
                 minor limitations to the sentences of relational
                 algebra.",
  acknowledgement = ack-nhfb,
  generalterms = "Management; Theory; Verification",
  keywords =     "management; theory; verification",
  subject =      "{\bf H.2.0} Information Systems, DATABASE MANAGEMENT,
                 General, Security, integrity, and protection**. {\bf
                 H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf G.2.m} Mathematics of Computing,
                 DISCRETE MATHEMATICS, Miscellaneous. {\bf H.2.1}
                 Information Systems, DATABASE MANAGEMENT, Logical
                 Design, Schema and subschema.",
}

@InProceedings{Paul:1987:AID,
  author =       "H. B. Paul and H. J. Schek and M. H. Scholl",
  title =        "Architecture and implementation of the {Darmstadt}
                 database kernel system",
  crossref =     "Dayal:1987:PAC",
  pages =        "196--207",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p196-paul/p196-paul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p196-paul/",
  abstract =     "The multi-layered architecture of the DArmStadt Data
                 Base System (DASDBS) for advanced applications is
                 introduced DASDBS is conceived as a family of
                 application-specific database systems on top of a
                 common database kernel system. The main design problem
                 considered here is, What features are common enough to
                 be integrated into the kernel and what features are
                 rather application-specific? Kernel features must be
                 simple enough to be efficiently implemented and to
                 serve a broad class of clients, yet powerful enough to
                 form a convenient basis for application-oriented
                 layers. Our kernel provides mechanisms to efficiently
                 store hierarchically structured complex objects, and
                 offers operations which are set-oriented and can be
                 processed in a single scan through the objects. To
                 achieve high concurrency in a layered system, a
                 multi-level transaction methodology is applied. First
                 experiences with our current implementation and some
                 lessons we have learned from it are reported.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf DASDBS}; Software --- Software
                 Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7): {\bf Extensibility**}",
}

@InProceedings{Imielinski:1987:RKD,
  author =       "T. Imielinski",
  title =        "Relative knowledge in a distributed database",
  crossref =     "ACM:1987:PPS",
  pages =        "197--209",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p197-imielinski/p197-imielinski.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p197-imielinski/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p197-imielinski/",
  abstract =     "Let DB be a database and let u 1, , u m be a
                 collection of users each having at his or her disposal
                 a query sublanguage L u 1 generated by some view
                 predicate Each of these users knows only as much as he
                 can learn from the database using his or her query
                 sublanguage. Such a knowledge is called {\em relative
                 knowledge\/} in the paper and its various properties
                 including the model and proof theory are investigated.
                 The applications of relative knowledge in the database
                 security and integrity are also discussed.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Management; Theory; Verification",
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Distributed databases. {\bf H.2.3} Information
                 Systems, DATABASE MANAGEMENT, Languages, Query
                 languages.",
}

@InProceedings{Richardson:1987:PCD,
  author =       "Joel E. Richardson and Michael J. Carey",
  title =        "Programming constructs for database system
                 implementation in {EXODUS}",
  crossref =     "Dayal:1987:PAC",
  pages =        "208--219",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p208-richardson/p208-richardson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p208-richardson/",
  abstract =     "The goal of the EXODUS extensible DBMS project is to
                 enable the rapid development of a wide spectrum of
                 high-performance, application-specific database systems
                 EXODUS provides certain kernel facilities for use by
                 all applications and a set of tools to aid the database
                 implementor (DBI) in generating new database system
                 software. Some of the DBI's work is supported by EXODUS
                 tools which generate database components from a
                 specification. However, components such as new abstract
                 data types, access methods, and database operations
                 must be explicitly coded by the DBI. This paper
                 analyzes the major programming problems faced by the
                 DBI, describing the collection of programming language
                 constructs that EXODUS provides for simplifying the
                 DBI's task. These constructs have been embedded in the
                 E programming language, an extension of C++ designed
                 specifically for implementing DBMS software.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Performance",
  subject =      "Software --- Programming Languages --- Language
                 Classifications (D.3.2): {\bf EXODUS}; Information
                 Systems --- Database Management --- Systems (H.2.4);
                 Software --- Programming Languages --- Language
                 Constructs and Features (D.3.3); Software --- Software
                 Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7): {\bf Extensibility**}",
}

@InProceedings{Afrati:1987:PCS,
  author =       "F. Afrati and C. Papadimitriou",
  title =        "The Parallel Complexity of Simple Chain Queries",
  crossref =     "ACM:1987:PPS",
  pages =        "210--213",
  month =        mar,
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "Database/Wiederhold.bib;
                 http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p210-afrati/p210-afrati.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p210-afrati/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p210-afrati/",
  acknowledgement = ack-nhfb,
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.1.2} Theory of
                 Computation, COMPUTATION BY ABSTRACT DEVICES, Modes of
                 Computation, Parallelism and concurrency. {\bf H.2.3}
                 Information Systems, DATABASE MANAGEMENT, Languages,
                 Datalog.",
}

@InProceedings{Beeri:1987:BPS,
  author =       "C. Beeri and P. Kanellakis and F. Bancilhon and R.
                 Ramakrishnan",
  title =        "Bounds on the propagation of selection into logic
                 programs",
  crossref =     "ACM:1987:PPS",
  pages =        "214--226",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p214-beeri/p214-beeri.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p214-beeri/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p214-beeri/",
  abstract =     "We consider the problem of propagating selections
                 (i.e., bindings of variables) into logic programs. In
                 particular, we study the class of binary chain programs
                 and define selection propagation as the task of finding
                 an equivalent program containing only unary derived
                 predicates. We associate a context free grammar {\em
                 L(H)\/} with every binary chain program {\em H}. We
                 show that, given {$H$} propagating a selection
                 involving some constant is possible iff {\em L(H)\/} is
                 regular, and therefore undecidable. We also show that
                 propagating a selection of the form {\em p(X,X)\/} is
                 possible iff {\em L(H)\/} is finite, and therefore
                 decidable. We demonstrate the connection of these two
                 cases, respectively, with the weak monadic second order
                 theory of one successor and with monadic generalized
                 spectra. We further clarify the analogy between chain
                 programs and languages from the point of view of
                 program equivalence and selection propagation
                 heuristics.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Management; Theory; Verification",
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Logic and
                 constraint programming. {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Logic programming. {\bf F.4.2} Theory
                 of Computation, MATHEMATICAL LOGIC AND FORMAL
                 LANGUAGES, Grammars and Other Rewriting Systems,
                 Grammar types. {\bf H.2.3} Information Systems,
                 DATABASE MANAGEMENT, Languages, Query languages.",
}

@InProceedings{Lindsay:1987:DME,
  author =       "Bruce Lindsay and John McPherson and Hamid Pirahesh",
  title =        "A data management extension architecture",
  crossref =     "Dayal:1987:PAC",
  pages =        "220--226",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p220-lindsay/p220-lindsay.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p220-lindsay/",
  abstract =     "A database management system architecture is described
                 that facilitates the implementation of data management
                 extensions for relational database systems. The
                 architecture defines two classes of data management
                 extensions alternative ways of storing relations called
                 relation ``storage methods'', and access paths,
                 integrity constraints, or triggers which are
                 ``attachments'' to relations. Generic sets of
                 operations are defined for storage methods and
                 attachments, and these operations must be provided in
                 order to add a new storage method or attachment type to
                 the system. The data management extension architecture
                 also provides common services for coordination of
                 storage method and attachment execution. This article
                 describes the data management extension architecture
                 along with some implementation issues and techniques.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4); Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}; Software --- Software Engineering ---
                 Distribution, Maintenance, and Enhancement (D.2.7):
                 {\bf Extensibility**}",
}

@InProceedings{Naughton:1987:DCB,
  author =       "J. F. Naughton and Y. Sagiv",
  title =        "A decidable class of bounded recursions",
  crossref =     "ACM:1987:PPS",
  pages =        "227--236",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p227-naughton/p227-naughton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p227-naughton/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p227-naughton/",
  abstract =     "Detecting bounded recursions is a powerful
                 optimization technique for recursions database query
                 languages as bounded recursions can be replaced by
                 equivalent nonrecursive definitions. The problem is of
                 theoretical interest because by varying the class of
                 recursions considered one can generate instances that
                 vary from linearly decidable to NP-hard to undecidable.
                 In this paper we review and clarify the existing
                 definitions of boundedness. We then specify a sample
                 criterion that guarantees that the condition in
                 Vaughton [7] is necessary and sufficient for
                 boundedness. The programs satisfying this criterion
                 subsume and extend previously known decidable classes
                 of bounded linear recursions.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Management; Theory; Verification",
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Query languages. {\bf F.3.3} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS, Studies
                 of Program Constructs, Program and recursion schemes.
                 {\bf G.2.m} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Miscellaneous.",
}

@InProceedings{Jajodia:1987:DV,
  author =       "Sushil Jajodia and David Mutchler",
  title =        "Dynamic voting",
  crossref =     "Dayal:1987:PAC",
  pages =        "227--238",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p227-jajodia/p227-jajodia.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p227-jajodia/",
  abstract =     "In a voting-based algorithm, a replicated file can be
                 updated in a partition if it contains a majority of
                 copies. In this paper, we propose an extension of this
                 scheme which permits a file to be updated in a
                 partition provided it contains a majority of up-to-date
                 copies. Our scheme not only preserves mutual
                 consistency of the replicated file, but provides
                 improvement in its availability as well. We develop a
                 stochastic model which gives insight into the
                 improvements afforded by our scheme over the voting
                 scheme.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Software --- Operating Systems --- File Systems
                 Management (D.4.3): {\bf Maintenance**}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Distributed databases}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Transaction processing}",
}

@InProceedings{Shmueli:1987:DEA,
  author =       "O. Shmueli",
  title =        "Decidability and expressiveness aspects of logic
                 queries",
  crossref =     "ACM:1987:PPS",
  pages =        "237--249",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p237-shmueli/p237-shmueli.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p237-shmueli/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p237-shmueli/",
  abstract =     "This paper addresses some basic problems regarding
                 logic programming based queries over relational
                 databases. We re-examine the query classes {$H$} and
                 {\em YE\/} + defined by Chandra and Harel [2] We define
                 {$H$} + and {\em YE\/} ++ which differ from {$H$} and
                 {\em YE\/} + in that the use of equality (=) and
                 inequality () is prohibited. We show that {$H$} + is
                 more expressive than {\em YE\/} ++ and that any {$H$} +
                 program can be transformed into an equivalent {$H$} +
                 program containing a single recursive predicate without
                 using the equality or inequality operators. As a
                 corollary we obtain a fixpoint formula characterization
                 of {$H$} + queries. \par

                 We consider the problems of determining containment,
                 equivalence, and satisfiability of logic based queries.
                 The containment and equivalence problems addressed here
                 extend the work of Aho, Sagiv and Ullman on relational
                 queries [1] and Papadimitrious on Prolog [10]. As
                 corollaries we show that determining safety and literal
                 redundancy are both undecidable problems.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Management; Theory; Verification",
  keywords =     "languages; management; theory; verification",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Logic and constraint programming. {\bf I.2.3}
                 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Deduction and Theorem Proving, Logic programming.",
}

@InProceedings{Haerder:1987:CTR,
  author =       "Theo Haerder and Kurt Rothermel",
  title =        "Concepts for transaction recovery in nested
                 transactions",
  crossref =     "Dayal:1987:PAC",
  pages =        "239--248",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p239-haerder/p239-haerder.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p239-haerder/",
  abstract =     "The concept of nested transactions offers more
                 decomposable execution units and finer grained control
                 over recovery and concurrency as compared to `flat'
                 transactions. To exploit these advantages, especially
                 transaction recovery has to be refined and adjusted to
                 the requirements of the control structure. \par

                 In this paper, we investigate transaction recovery for
                 nested transactions. Therefore, a model for nested
                 transaction is introduced allowing for synchronous and
                 asynchronous transaction invocation as well as single
                 call and conversational interfaces. For the resulting
                 four parameter combinations, the properties and
                 dependencies of transaction recovery are explored if a
                 transaction is `unit of recovery' and if savepoints
                 within transactions are used to gain finer recovery
                 units.",
  acknowledgement = ack-nhfb,
  generalterms = "Performance; Security; Theory",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}",
}

@InProceedings{Garcia-Molina:1987:S,
  author =       "Hector Garcia-Molina and Kenneth Salem",
  title =        "Sagas",
  crossref =     "Dayal:1987:PAC",
  pages =        "249--259",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p249-garcia-molina/p249-garcia-molina.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p249-garcia-molina/",
  abstract =     "Long lived transactions (LLTs) hold on to database
                 resources for relatively long periods of time,
                 significantly delaying the termination of shorter and
                 more common transactions. To alleviate these problems
                 we propose the notion of a saga. A LLT is a saga if it
                 can be written as a sequence of transactions that can
                 be interleaved with other transactions. The database
                 management system guarantees that either all the
                 transactions in a saga are successfully completed or
                 compensating transactions are run to amend a partial
                 execution. Both the concept of saga and its
                 implementation are relatively simple, but they have the
                 potential to improve performance significantly. We
                 analyze the various implementation issues related to
                 sagas, including how they can be run on an existing
                 system that does not directly support them. We also
                 discuss techniques for database and LLT design that
                 make it feasible to break up LLTs into sagas.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2)",
}

@InProceedings{Selinger:1987:CEI,
  author =       "P. Selinger",
  title =        "Chickens and eggs --- the interrelationship of systems
                 and theory",
  crossref =     "ACM:1987:PPS",
  pages =        "250--253",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p250-selinger/p250-selinger.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p250-selinger/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p250-selinger/",
  abstract =     "This paper describes a personal perspective of the
                 kinds of contributions that systems research and
                 theoretical research make to one another particularly
                 in the database area. Examples of each kind of
                 contribution are given, and then several case studies
                 from the author a personal experience are presented.
                 The case studies illustrate database systems research
                 where theoretical work contributed to systems results
                 and vice versa. Areas of database systems which need
                 more contributions from the theoretical community will
                 also be presented.",
  acknowledgement = ack-nhfb,
  generalterms = "Management; Theory",
  keywords =     "management; theory",
  subject =      "{\bf H.1.1} Information Systems, MODELS AND
                 PRINCIPLES, Systems and Information Theory.",
}

@InProceedings{Karabeg:1987:ASR,
  author =       "A. Karabeg and D. Karabeg and K. Papakonstantinou and
                 V. Vianu",
  title =        "Axiomatization and simplification rules for relational
                 transactions",
  crossref =     "ACM:1987:PPS",
  pages =        "254--259",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p254-karabeg/p254-karabeg.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p254-karabeg/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p254-karabeg/",
  acknowledgement = ack-nhfb,
  keywords =     "management; theory; verification",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Transaction processing.
                 {\bf G.2.m} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Miscellaneous.",
}

@InProceedings{Abiteboul:1987:TLC,
  author =       "S. Abiteboul and V. Vianu",
  title =        "A translation language complete for database update
                 and specification",
  crossref =     "ACM:1987:PPS",
  pages =        "260--268",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p260-abiteboul/p260-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p260-abiteboul/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p260-abiteboul/",
  acknowledgement = ack-nhfb,
  keywords =     "design; languages; management",
  subject =      "{\bf D.3.2} Software, PROGRAMMING LANGUAGES, Language
                 Classifications, TL.",
}

@InProceedings{Freeston:1987:BFN,
  author =       "Michael Freeston",
  title =        "The {BANG} file: a new kind of grid file",
  crossref =     "Dayal:1987:PAC",
  pages =        "260--269",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p260-freeston/p260-freeston.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p260-freeston/",
  abstract =     "A new multi-dimensional file structure has been
                 developed in the course of a project to devise ways of
                 improving the support for interactive queries to
                 database and knowledge bases. Christened the `BANG'
                 file - a Balanced And Nested Grid - the new structure
                 is of the `grid file' type, but is fundamentally
                 different from previous grid file designs in that it
                 does not share their common underlying properties. It
                 has a tree-structured directory which has the
                 self-balancing property of a B-tree and which, in
                 contrast to previous designs, always expands at the
                 same rate as the data, whatever the form of the data
                 distribution. Its partitioning strategy both accurately
                 reflects the clustering of points in the data space,
                 and is flexible enough to adapt gracefully to changes
                 in the distribution.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Data --- Files (E.5): {\bf Organization/structure};
                 Data --- Data Structures (E.1): {\bf Trees};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}",
}

@InProceedings{Beeri:1987:PM,
  author =       "C. Beeri and R. Ramakrishnan",
  title =        "On the power of magic",
  crossref =     "ACM:1987:PPS",
  pages =        "269--284",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p269-beeri/p269-beeri.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p269-beeri/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p269-beeri/",
  abstract =     "This paper considers the efficient evaluation of
                 recursive queries expressed using Horn Clauses. We
                 define {\em sideways information passing\/} formally
                 and show how a query evaluation algorithm may be
                 defined in terms of sideways information passing and
                 control. We then consider a class of information
                 passing strategies which suffices to describe most
                 query evaluation algorithms in the database literature,
                 and show that these strategies may always be
                 implemented by rewriting a given program and evaluating
                 the rewritten program bottom-up. We describe in detail
                 several algorithms for rewriting a program. These
                 algorithms generalize the Counting and Magic Sets
                 algorithms to work with arbitrary programs. Safety and
                 optimality of the algorithms are also considered.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Management; Performance; Theory;
                 Verification",
  keywords =     "algorithms; management; performance; theory;
                 verification",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.3.3} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS, Studies
                 of Program Constructs, Program and recursion schemes.
                 {\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Logic and
                 constraint programming.",
}

@InProceedings{Nelson:1987:PAH,
  author =       "Randal C. Nelson and Hanan Samet",
  title =        "A population analysis for hierarchical data
                 structures",
  crossref =     "Dayal:1987:PAC",
  pages =        "270--277",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p270-nelson/p270-nelson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p270-nelson/",
  abstract =     "A new method termed population analysis is presented
                 for approximating the distribution of node occupancies
                 in hierarchical data structures which store a variable
                 number of geometric data items per node. The basic idea
                 is to describe a dynamic data structure as a set of
                 populations which are permitted to transform into one
                 another according to certain rules. The transformation
                 rules are used to obtain a set of equations describing
                 a population distribution which is stable under
                 insertion of additional information into the structure.
                 These equations can then be solved, either analytically
                 or numerically, to obtain the population distribution.
                 Hierarchical data structures are modeled by letting
                 each population represent the nodes of a given
                 occupancy. A detailed analysis of quadtree data
                 structures for storing point data is presented, and the
                 results are compared to experimental data. Two
                 phenomena referred to as {\em aging\/} and {\em
                 phasing\/} are defined and shown to account for the
                 differences between the experimental results and those
                 predicted by the model. The population technique is
                 compared with statistical methods of analyzing similar
                 data structures.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Data --- Data Structures (E.1): {\bf Trees}; Data ---
                 Files (E.5): {\bf Organization/structure}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}",
}

@InProceedings{Sellis:1987:ESP,
  author =       "Timos K. Sellis",
  title =        "Efficiently supporting procedures in relational
                 database systems",
  crossref =     "Dayal:1987:PAC",
  pages =        "278--291",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p278-sellis/p278-sellis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p278-sellis/",
  abstract =     "We examine an extended relational database system
                 which supports database procedures as full fledged
                 objects. In particular, we focus on the problems of
                 query processing and efficient support for database
                 procedures. First, a variation to the original INGRES
                 decomposition algorithm is presented. Then, we examine
                 the idea of storing results of previously processed
                 procedures in secondary storage ({\em caching\/}).
                 Using a cache, the cost of processing a query can be
                 reduced by preventing multiple evaluations of the same
                 procedure. Problems associated with cache
                 organizations, such as replacement policies and
                 validation schemes are examined. Another means for
                 reducing the execution cost of queries is indexing. A
                 new indexing scheme for cached results, Partial
                 Indexing, is proposed and analyzed.",
  acknowledgement = ack-nhfb,
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}",
}

@InProceedings{Grahne:1987:EES,
  author =       "G. Grahne and S. Sippu and E. Soisalon-Soininen",
  title =        "Efficient evaluation for a subset of recursive
                 queries",
  crossref =     "ACM:1987:PPS",
  pages =        "284--293",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p284-grahne/p284-grahne.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p284-grahne/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p284-grahne/",
  abstract =     "Well-known results on graph traversal are used to
                 develop a practical, efficient algorithm for evaluating
                 regularly and linearly recursive queries in databases
                 that contain only binary relations. Transformations are
                 given that reduce a subset of regular and linear
                 queries involving $n$-ary relations ($n^2$) to queries
                 involving only binary relations.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Management; Measurement; Performance;
                 Theory",
  keywords =     "algorithms; management; measurement; performance;
                 theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.3.3} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS, Studies
                 of Program Constructs, Program and recursion schemes.
                 {\bf G.2.m} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Miscellaneous.",
  xxauthor =     "G. Grahne and S. Siu and E. Soisalon-Soininen",
}

@InProceedings{Hardwick:1987:WRF,
  author =       "Martin Hardwick",
  title =        "Why {ROSE} is fast: {Five} optimizations in the design
                 of an experimental database system for {CAD\slash CAM}
                 applications",
  crossref =     "Dayal:1987:PAC",
  pages =        "292--298",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p292-hardwick/p292-hardwick.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p292-hardwick/",
  abstract =     "ROSE is an experimental database system for CAD/CAM
                 applications that organizes a database into entries and
                 relationships. The data model of ROSE is an extension
                 of the relational model and the data manipulation
                 language is an extension of the relational algebra.
                 Internally, ROSE is organized so that it can use
                 operating system services to implement database system
                 services. In this paper we describe five optimizations
                 that have helped to make ROSE a fast database system
                 for CAD/CAM.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Experimentation; Performance",
  subject =      "Computer Applications --- Computer-Aided Engineering
                 (J.6); Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf ROSE}",
}

@InProceedings{Marchetti-Spaccamella:1987:WCC,
  author =       "A. Marchetti-Spaccamella and A. Pelaggi and D. Sacca",
  title =        "Worst-case complexity analysis of methods for logic
                 query implementation",
  crossref =     "ACM:1987:PPS",
  pages =        "294--301",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p294-marchetti-spaccamella/p294-marchetti-spaccamella.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p294-marchetti-spaccamella/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p294-marchetti-spaccamella/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; management; theory",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Query processing. {\bf
                 G.2.m} Mathematics of Computing, DISCRETE MATHEMATICS,
                 Miscellaneous.",
}

@InProceedings{Kemper:1987:OOS,
  author =       "Alfons Kemper and Peter C. Lockemann and Mechtild
                 Wallrath",
  title =        "An object-oriented system for engineering
                 applications",
  crossref =     "Dayal:1987:PAC",
  pages =        "299--310",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p299-kemper/p299-kemper.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p299-kemper/",
  abstract =     "One of the most promising approaches to database
                 support of engineering applications is the concept of
                 object-oriented database management. Object-orientation
                 is usually approached from either a behavioral or
                 structural viewpoint. The former emphasizes the
                 application-specific manipulation of technical objects
                 while hiding their structural details whereas the
                 latter concentrates on the structural aspects and their
                 efficient implementation. The thesis of the paper is
                 that the two viewpoints may enter into a fruitful
                 symbiosis where a behaviorally object-oriented system
                 is implemented on top of a structurally object-oriented
                 database system, thereby combining ease of use by the
                 engineer with high database system performance. The
                 thesis will be demonstrated in the paper by a
                 user-friendly interface based on user-definable
                 abstract datatypes and its implementation using a
                 prototype for the non-first-normal-form (NF 2)
                 relational model, and will be supported by an
                 engineering example application from off-line robot
                 programming.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computer
                 Applications --- Physical Sciences and Engineering
                 (J.2): {\bf Engineering}; Information Systems ---
                 Database Management --- Systems (H.2.4); Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2)",
}

@InProceedings{VanGucht:1987:EPE,
  author =       "D. {Van Gucht}",
  title =        "On the expressive power of the extended relational
                 algebra for the unnormalized relational model",
  crossref =     "ACM:1987:PPS",
  pages =        "302--312",
  year =         "1987",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p302-van_gucht/p302-van_gucht.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p302-van_gucht/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p302-van_gucht/",
  acknowledgement = ack-nhfb,
  keywords =     "design; management; theory; verification",
  subject =      "{\bf H.2.m} Information Systems, DATABASE MANAGEMENT,
                 Miscellaneous. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design.",
}

@InProceedings{Banerjee:1987:SIS,
  author =       "Jay Banerjee and Won Kim and Hyoung-Joo Kim and Henry
                 F. Korth",
  title =        "Semantics and implementation of schema evolution in
                 object-oriented databases",
  crossref =     "Dayal:1987:PAC",
  pages =        "311--322",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p311-banerjee/p311-banerjee.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p311-banerjee/",
  abstract =     "Object-oriented programming is well-suited to such
                 data-intensive application domains as CAD/CAM, AI, and
                 OIS (office information systems) with multimedia
                 documents. At MCC we have built a prototype
                 object-oriented database system, called ORION. It adds
                 persistence and sharability to objects created and
                 manipulated in applications implemented in an
                 object-oriented programming environment. One of the
                 important requirements of these applications is schema
                 evolution, that is, the ability to dynamically make a
                 wide variety of changes to the database schema. In this
                 paper, following a brief review of the object-oriented
                 data model that we support in ORION, we establish a
                 framework for supporting schema evolution, define the
                 semantics of schema evolution, and discuss its
                 implementation.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Information Storage and Retrieval --- Systems and
                 Software (H.3.4): {\bf ORION}",
}

@InProceedings{VanGelder:1987:SCT,
  author =       "A. {Van Gelder} and R. Topor",
  title =        "Safety and correct translation of relational calculus
                 formulas",
  crossref =     "ACM:1987:PPS",
  pages =        "313--327",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p313-van_gelder/p313-van_gelder.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p313-van_gelder/",
  abstract =     "Not all queries in relational calculus can be answered
                 ``sensibly'' once disjunction, negation, and universal
                 quantification are allowed. The class of relational
                 calculus queries, or formulas, that have ``sensible''
                 answers is called the {\em domain independent\/} class,
                 which is known to be undecidable. Subsequent research
                 has focused on identifying large decidable subclasses
                 of domain independent formulas In this paper we
                 investigate the properties of two such classes the {\em
                 evaluable\/} formulas and the {\em allowed\/} formulas.
                 Although both classes have been defined before, we give
                 simplified definitions, present short proofs of their
                 man properties, and describe a method to incorporate
                 equality. \par

                 Although evaluable queries have sensible answers, it is
                 not straightforward to compute them efficiently or
                 correctly. We introduce {\em relational algebra normal
                 form\/} for formulas from which form the correct
                 translation into relational algebra is trivial. We give
                 algorithms to transform an evaluable formula into an
                 equivalent {\em allowed\/} formula, and from there into
                 relational algebra normal form. Our algorithms avoid
                 use of the so-called {\em Dom\/} relation, consisting
                 of all constants appearing in the database or the
                 query. \par

                 Finally, we describe a restriction under which every
                 domain independent formula is evaluable, and argue that
                 evaluable formulas may be the largest decidable
                 subclass of the domain independent formulas that can be
                 efficiently recognized.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Management; Performance; Theory;
                 Verification",
  keywords =     "algorithms; management; performance; theory;
                 verification",
  subject =      "Information Systems --- Database Management ---
                 Miscellaneous (H.2.m); Computing Methodologies ---
                 Artificial Intelligence --- Deduction and Theorem
                 Proving (I.2.3): {\bf Deduction}",
}

@InProceedings{Cruz:1987:GQL,
  author =       "Isabel F. Cruz and Alberto O. Mendelzon and Peter T.
                 Wood",
  title =        "A graphical query language supporting recursion",
  crossref =     "Dayal:1987:PAC",
  pages =        "323--330",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p323-cruz/p323-cruz.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p323-cruz/",
  abstract =     "We define a language G for querying data represented
                 as a labeled graph {\em G}. By considering {$G$} as a
                 relation, this graphical query language can be viewed
                 as a relational query language, and its expressive
                 power can be compared to that of other relational query
                 languages. We do not propose G as an alternative to
                 general purpose relational query languages, but rather
                 as a complementary language in which recursive queries
                 are simple to formulate. The user is aided in this
                 formulation by means of a graphical interface. The
                 provision of regular expressions in G allows recursive
                 queries more general than transitive closure to be
                 posed, although the language is not as powerful as
                 those based on function-free Horn clauses. However, we
                 hope to be able to exploit well-known graph algorithms
                 in evaluating recursive queries efficiently, a topic
                 which has received widespread attention recently.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3); Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2): {\bf Path and
                 circuit problems}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Recursive function
                 theory}",
}

@InProceedings{Ramakrishnan:1987:SRH,
  author =       "R. Ramakrishnan and F. Bancilhon and A. Silberschatz",
  title =        "Safety of recursive {Horn} clauses with infinite
                 relations",
  crossref =     "ACM:1987:PPS",
  pages =        "328--339",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p328-ramakrishnan/p328-ramakrishnan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p328-ramakrishnan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p328-ramakrishnan/",
  abstract =     "A database query is said to be {\em safe\/} if its
                 result consists of a finite set of tuples If a query is
                 expressed using a set of pure Horn Clauses, the problem
                 of determining whether it is safe is in general
                 undecidable In this paper, we show that the problem is
                 decidable when terms involving function symbols
                 (including arithmetic) are represented as distinct
                 occurrences of uninterpreted infinite predicates over
                 which certain {\em finiteness dependencies\/} hold. We
                 present a sufficient condition for safety when some
                 {\em monotonicity constraints\/} also hold.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Management; Performance; Theory",
  keywords =     "algorithms; management; performance; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.4.1} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic, Logic and constraint programming.
                 {\bf H.2.0} Information Systems, DATABASE MANAGEMENT,
                 General, Security, integrity, and protection**.",
}

@InProceedings{Jagadish:1987:STC,
  author =       "H. V. Jagadish and Rakesh Agrawal and Linda Ness",
  title =        "A study of transitive closure as a recursion
                 mechanism",
  crossref =     "Dayal:1987:PAC",
  pages =        "331--344",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p331-jagadish/p331-jagadish.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p331-jagadish/",
  abstract =     "We show that every linearly recursive query can be
                 expressed as a transitive closure possibly preceded and
                 followed by operations already available in relational
                 algebra. This reduction is possible even if there are
                 repeated variables in the recursive literals and if
                 some of the arguments in the recursive literals are
                 constants. Such an equivalence has significant
                 theoretical and practical ramifications. One the one
                 hand it influences the design of expressive notations
                 to capture recursion as an augmentation of relational
                 query languages. On the other hand implementation of
                 deductive databases is impacted in that the design does
                 not have to provide the generality that linear
                 recursion would demand. It suffices to study the single
                 problem of transitive closure and to provide an
                 efficient implementation for it.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Recursive function theory}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}",
}

@InProceedings{Naughton:1987:OSR,
  author =       "J. F. Naughton",
  title =        "One-sided recursions",
  crossref =     "ACM:1987:PPS",
  pages =        "340--348",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p340-naughton/p340-naughton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p340-naughton/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p340-naughton/",
  abstract =     "The performance of systems with recursive query
                 languages can be improved by recognizing simple, easily
                 evaluable classes of recursions and using algorithms
                 tailored to these classes whenever possible. In this
                 paper we identify a useful subset of recursive
                 definitions, the {\em one-sided recursions}. We show
                 how to detect one-sided recursions, and give two simple
                 evaluation algorithms that cover one-sided definitions
                 in that for any selection on a one-sided definition, at
                 least one of the two algorithms will apply. These
                 algorithms have simple termination conditions, maintain
                 minimal state and use selections on the recursively
                 defined relation whenever possible. We show that there
                 are no similar algorithms for many-sided recursions We
                 also prove that it is undecidable whether an arbitrary
                 definition has an equivalent one-sided definition.
                 However, we do present a procedure that converts many
                 potentially one-sided recursions to one-sided form, and
                 prove it complete for a useful class of recursions.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Management; Performance;
                 Theory",
  keywords =     "algorithms; languages; management; performance;
                 theory",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Studies of Program Constructs (F.3.3): {\bf Program and
                 recursion schemes}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Recursive function
                 theory}",
}

@InProceedings{Zhang:1987:NCD,
  author =       "Weining Zhang and C. T. Yu",
  title =        "A necessary condition for a doubly recursive rule to
                 be equivalent to a linear recursive rule",
  crossref =     "Dayal:1987:PAC",
  pages =        "345--356",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p345-zhang/p345-zhang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p345-zhang/",
  abstract =     "Nonlinear recursive queries are usually less efficient
                 in processing than linear recursive queries. It is
                 therefore of interest to transform non-linear recursive
                 queries into linear ones. We obtain a necessary and
                 sufficient condition for a doubly recursive rule of a
                 certain type to be logically equivalent to a single
                 linear recursive rule obtained in a specific way.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Recursive function theory}",
}

@InProceedings{Sagiv:1987:ODP,
  author =       "Y. Sagiv",
  title =        "Optimizing datalog programs",
  crossref =     "ACM:1987:PPS",
  pages =        "349--362",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/28659/p349-sagiv/p349-sagiv.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/28659/p349-sagiv/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/28659/p349-sagiv/",
  abstract =     "Datalog programs, i.e., Prolog programs without
                 function symbols, are considered It is assumed that a
                 variable appearing in the head of a rule must also
                 appear in the body of the rule. The input of a program
                 is a set of ground atoms (which are given in addition
                 to the program's rules) and, therefore, can be viewed
                 as an assignment of relations to some of the program's
                 predicates. Two programs are equivalent if they produce
                 the same result for all possible assignments of
                 relations to the extensional predicates (i.e., the
                 predicates that do not appear as heads of rules). Two
                 programs are uniformly equivalent if they produce the
                 same result for all possible assignments of initial
                 relations to all the predicates (i.e., both extensional
                 and intentional). The equivalence problem for Datalog
                 programs is known to be undecidable. It is shown that
                 uniform equivalence is decidable, and an algorithm is
                 given for minimizing a Datalog program under uniform
                 equivalence. A technique for removing parts of a
                 program that are redundant under equivalence (but not
                 under uniform equivalence) is developed. A procedure
                 for testing uniform equivalence is also developed for
                 the case in which the database satisfies some
                 constraints.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Management; Theory;
                 Verification",
  keywords =     "algorithms; languages; management; theory;
                 verification",
  subject =      "{\bf I.2.7} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Natural Language Processing, DATALOG.
                 {\bf G.2.m} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Miscellaneous. {\bf F.3.3} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS, Studies
                 of Program Constructs, Program and recursion schemes.",
}

@InProceedings{Morgenstern:1987:SIM,
  author =       "Matthew Morgenstern",
  title =        "Security and inference in multilevel database and
                 knowledge-base systems",
  crossref =     "Dayal:1987:PAC",
  pages =        "357--373",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p357-morgenstern/p357-morgenstern.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p357-morgenstern/",
  abstract =     "This paper addresses the threat to multilevel security
                 that arises from logical inference and the semantics of
                 the application. Such compromises of security are
                 particularly challenging since they circumvent
                 traditional security mechanisms and rely on a user's
                 knowledge of the application. The problems of inference
                 and security have heretofore been amorphous and
                 difficult to circumscribe. We focus on these problems
                 in the context of a multilevel database system and show
                 their relevance to knowledge-based systems, sometimes
                 referred to as expert systems. Here we establish a
                 framework for studying these inference control
                 problems, describe a representation for relevant
                 semantics of the application, develop criteria for
                 safety and security of a system to prevent these
                 problems, and outline algorithms for enforcing these
                 criteria.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Security",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3): {\bf
                 Deduction}; Information Systems --- Database Management
                 --- Systems (H.2.4); Computing Methodologies ---
                 Artificial Intelligence --- Applications and Expert
                 Systems (I.2.1); Information Systems --- Database
                 Management --- General (H.2.0): {\bf Security,
                 integrity, and protection**}",
}

@InProceedings{Stemple:1987:MMF,
  author =       "David Stemple and Subhasish Mazumdar and Tim Sheard",
  title =        "On the modes and meaning of feedback to transaction
                 designers",
  crossref =     "Dayal:1987:PAC",
  pages =        "374--386",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p374-stemple/p374-stemple.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p374-stemple/",
  abstract =     "An analysis of database transactions in the presence
                 of database integrity constraints can lead to several
                 modes of feedback to transaction designers. The
                 different kinds of feedback include tests and updates
                 that could be added to the transaction to make it obey
                 the integrity constraints, as well as predicates
                 representing post-conditions guaranteed by a
                 transaction's execution. We discuss the various modes,
                 meanings, and uses of feedback. We also discuss methods
                 of generating feedback from integrity constraints,
                 transaction details and theorems constituting both
                 generic knowledge of database systems and specific
                 knowledge about a particular database. Our methods are
                 based on a running system that generates tailored
                 theories about database systems from their schemas and
                 uses these theories to prove that transactions obey
                 integrity constraints.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Documentation; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management --- General
                 (H.2.0): {\bf Security, integrity, and protection**};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Pre- and post-conditions};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Normal forms}",
}

@InProceedings{Rubenstein:1987:BSD,
  author =       "W. B. Rubenstein and M. S. Kubicar and R. G. G.
                 Cattell",
  title =        "Benchmarking simple database operations",
  crossref =     "Dayal:1987:PAC",
  pages =        "387--394",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p387-rubenstein/p387-rubenstein.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p387-rubenstein/",
  abstract =     "There are two widely-known benchmarks for database
                 management systems the TP1 benchmarks (Anon {\em et
                 al\/} [1985]), designed to measure transaction
                 throughout, and the Wisconsin benchmarks (Bitton,
                 Dewitt, Turbyfil [1984]), designed to measure the
                 performance of a relational query processor. In our
                 work with databases on engineering workstations, we
                 found neither of these benchmarks a suitable measure
                 for our applications' needs. Instead, our requirements
                 are for {\em response time\/} for simple queries. We
                 propose benchmark measurements to measure response
                 time, specifically designed for the simple,
                 object-oriented queries that engineering database
                 applications perform. We report results from running
                 this benchmark against some database systems we use
                 ourselves, and provide enough detail for others to
                 reproduce the benchmark measurements on other
                 relational, object-oriented, or specialized database
                 systems. We discuss a number of factors that make an
                 order of magnitude improvement in benchmark performance
                 caching the entire database in main memory, avoiding
                 query optimization overhead, using physical links for
                 prejoins, and using an alternative to the
                 generally-accepted database ``server'' architecture on
                 distributed networks.",
  acknowledgement = ack-nhfb,
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computer
                 Systems Organization --- Performance of Systems (C.4);
                 Computing Milieux --- Management of Computing and
                 Information Systems --- Installation Management
                 (K.6.2): {\bf Benchmarks}",
}

@InProceedings{Gray:1987:MRT,
  author =       "Jim Gray and Franco Putzolu",
  title =        "The $5$ minute rule for trading memory for disc
                 accesses and the $10$ byte rule for trading memory for
                 {CPU} time",
  crossref =     "Dayal:1987:PAC",
  pages =        "395--398",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p395-gray/p395-gray.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p395-gray/",
  abstract =     "If an item is accessed frequently enough, it should be
                 main memory resident. For current technology,
                 ``frequently enough'' means about every five minutes.
                 \par

                 Along a similar vein, one can frequently trade memory
                 space for CPU time. For example, bits can be packed in
                 a byte at the expense of extra instructions to extract
                 the bits. It makes economic sense to spend ten bytes of
                 main memory to save one instruction per second.
                 \par

                 These results depend on current price ratios of
                 processors, memory and disc accesses. These ratios are
                 changing and hence the constants in the rules are
                 changing.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance; Reliability; Theory",
  subject =      "Computer Systems Organization --- Performance of
                 Systems (C.4); Information Systems --- Database
                 Management --- Physical Design (H.2.2); Information
                 Systems --- Information Storage and Retrieval ---
                 Information Storage (H.3.2)",
}

@InProceedings{Richardson:1987:DEP,
  author =       "James P. Richardson and Hongjun Lu and Krishna
                 Mikkilineni",
  title =        "Design and evaluation of parallel pipelined join
                 algorithms",
  crossref =     "Dayal:1987:PAC",
  pages =        "399--409",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p399-richardson/p399-richardson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p399-richardson/",
  abstract =     "The join operation is the most costly operation in
                 relational database management systems. Distributed and
                 parallel processing can effectively speed up the join
                 operation. In this paper, we describe a number of
                 highly parallel and pipelined multiprocessor join
                 algorithms using sort-merge and hashing techniques.
                 Among them, two algorithms are parallel and pipelined
                 versions of traditional sort-merge join methods, two
                 algorithms use both hashing and sort-merge techniques,
                 and another two are variations of the hybrid hash join
                 algorithms. The performance of those algorithms is
                 evaluated analytically against a generic database
                 machine architecture. The methodology used in the
                 design and evaluation of these algorithms is also
                 discussed. \par

                 The results of the analysis indicate that using a
                 hashing technique to partition the source relations can
                 dramatically reduce the elapsed time hash-based
                 algorithms outperform sort-merge algorithms in almost
                 all cases because of their high parallelism. Hash-based
                 sort-merge and hybrid hash methods provide similar
                 performance in most cases. With large source relations,
                 the algorithms which replicate the smaller relation
                 usually give better elapsed time. Sharing memory among
                 processors also improves performance somewhat.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Mathematics
                 of Computing --- Numerical Analysis --- General
                 (G.1.0): {\bf Parallel algorithms}",
}

@InProceedings{Butler:1987:SRO,
  author =       "Margaret H. Butler",
  title =        "Storage reclamation in object oriented database
                 systems",
  crossref =     "Dayal:1987:PAC",
  pages =        "410--425",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p410-butler/p410-butler.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p410-butler/",
  abstract =     "When providing data management for nontraditional
                 data, database systems encounter storage reclamation
                 problems similar to those encountered by virtual memory
                 managers. The paging behavior of existing automatic
                 storage reclamation schemes as applied to objects
                 stored in a database management system is one indicator
                 of the performance cost of various features of storage
                 reclamation algorithms. The results of modeling the
                 paging behavior suggest that Mark and Sweep causes many
                 more input/output operations than Copy-Compact. A
                 contributing factor to the expense of Mark and Sweep is
                 that it does not recluster memory as does Copy-Compact.
                 If memory is not reclustered, the average cost of
                 accessing data can go up tremendously. Other algorithms
                 that do not recluster memory also suffer performance
                 problems, namely all reference counting schemes. The
                 main advantage of a reference count scheme is that it
                 does not force a running program to pause for a long
                 period of time while reclamation takes place, it
                 amortizes the cost of reclamation across all accesses.
                 The reclustering of Copy-Compact and the cost
                 amortization of Reference Count are combined to great
                 advantage in Baker's algorithm. This algorithm proves
                 to be the least prohibitive for operating on disk-based
                 data.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2)",
}

@InProceedings{Faloutsos:1987:AOO,
  author =       "Christos Faloutsos and Timos Sellis and Nick
                 Roussopoulos",
  title =        "Analysis of object oriented spatial access methods",
  crossref =     "Dayal:1987:PAC",
  pages =        "426--439",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p426-faloutsos/p426-faloutsos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p426-faloutsos/",
  abstract =     "This paper provides an analysis of R-trees and a
                 variation (R + -trees) that avoids overlapping
                 rectangles in intermediate nodes of the tree. The main
                 contributions of the paper are the following. We
                 provide the first known analysis of R-trees. Although
                 formulas are given for objects in one dimension (line
                 segments), they can be generalized for objects in
                 higher dimensions as well. We show how the
                 transformation of objects to higher dimensions [HINR83]
                 can be effectively used as a tool for the analysis of
                 R- and R + - trees. Finally, we derive formulas for R +
                 -trees and compare the two methods analytically. The
                 results we obtained show that R + -trees require less
                 than half the disk accesses required by a corresponding
                 R-tree when searching files of real life sizes R +
                 -trees are clearly superior in cases where there are
                 few long segments and a lot of small ones.",
  acknowledgement = ack-nhfb,
  generalterms = "Performance",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Database Management --- Systems
                 (H.2.4)",
}

@InProceedings{Hanson:1987:PAV,
  author =       "Eric N. Hanson",
  title =        "A performance analysis of view materialization
                 strategies",
  crossref =     "Dayal:1987:PAC",
  pages =        "440--453",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p440-hanson/p440-hanson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p440-hanson/",
  abstract =     "The conventional way to process commands for
                 relational views is to use query modification to
                 translate the commands into ones on the base relations.
                 An alternative approach has been proposed recently,
                 whereby materialized copies of views are kept, and
                 incrementally updated immediately after each
                 modification of the database. A related scheme exists,
                 in which update of materialized views is deferred until
                 just before data is retrieved from the view. A
                 performance analysis is presented comparing the cost of
                 query modification, immediate view maintenance, and
                 deferred view maintenance. Three different models of
                 the structure of views are given a simple selection and
                 projection of one relation, the natural join of two
                 relations, and an aggregate (e.g., the sum of values in
                 a column) over a selection-projection view. The results
                 show that the choice of the most efficient view
                 maintenance method depends heavily on the structure of
                 the database, the view definition, and the type of
                 query and update activity present.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1); Information Systems ---
                 Database Management --- Physical Design (H.2.2)",
}

@InProceedings{Segev:1987:LMT,
  author =       "Arie Segev and Arie Shoshani",
  title =        "Logical modeling of temporal data",
  crossref =     "Dayal:1987:PAC",
  pages =        "454--466",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p454-segev/p454-segev.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p454-segev/",
  abstract =     "In this paper we examine the semantics and develop
                 constructs for temporal data independent of any
                 traditional data model, such as the relational or
                 network data models. Unlike many other works which
                 extend existing models to support temporal data, our
                 purpose is to characterize the properties of temporal
                 data and operators over them without being influenced
                 by traditional models which were not specifically
                 designed to model temporal data. We develop data
                 constructs that represent sequences of temporal values,
                 identify their semantic properties, and define
                 operations over these structures.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Semantics of Programming Languages
                 (F.3.2): {\bf Algebraic approaches to semantics};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Abstract data types}",
}

@InProceedings{McKenzie:1987:ERA,
  author =       "Edwin McKenzie and Richard Snodgrass",
  title =        "Extending the relational algebra to support
                 transaction time",
  crossref =     "Dayal:1987:PAC",
  pages =        "467--478",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p467-mckenzie/p467-mckenzie.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p467-mckenzie/",
  abstract =     "In this paper we discuss extensions to the
                 conventional relational algebra to support transaction
                 time. We show that these extensions are applicable to
                 historical algebras that support valid time, yielding a
                 temporal algebraic language. Since transaction time
                 concerns the storage of information in the database,
                 the notion of state is central. The extensions are
                 formalized using denotational semantics. The additions
                 preserve the useful properties of the conventional
                 relational algebra.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Transaction processing}; Theory of Computation ---
                 Logics and Meanings of Programs --- Semantics of
                 Programming Languages (F.3.2): {\bf Algebraic
                 approaches to semantics}",
}

@InProceedings{Rubenstein:1987:DDM,
  author =       "W. Bradley Rubenstein",
  title =        "A database design for musical information",
  crossref =     "Dayal:1987:PAC",
  pages =        "479--490",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p479-rubenstein/p479-rubenstein.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p479-rubenstein/",
  abstract =     "As part of our research into a general purpose data
                 management system for musical information, a major
                 focus has been the development of tools to support a
                 data model for music. This paper first outlines the
                 various types of information that fall under the
                 purview of our proposed data manager. We consider
                 extensions to the entity-relationship data model to
                 implement the notion of {\em hierarchical ordering},
                 commonly found in musical data. We then present
                 examples from our schema for representing musical
                 notation in a database, taking advantage of these
                 extensions.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Computer Applications --- Arts and Humanities (J.5):
                 {\bf Music**}; Information Systems --- Database
                 Management --- General (H.2.0); Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}",
}

@InProceedings{Hudson:1987:OOD,
  author =       "Scott E. Hudson and Roger King",
  title =        "Object-oriented database support for software
                 environments",
  crossref =     "Dayal:1987:PAC",
  pages =        "491--503",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p491-hudson/p491-hudson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p491-hudson/",
  abstract =     "Cactis is an object-oriented, multi-user DBMS
                 developed at the University of Colorado. The
                 implementation is self-adaptive and concurrent, and
                 runs in the Unix/C Sun workstation environment. A
                 central, unique focus of Cactis is the support of
                 functionally-defined data in a manner which provides
                 good performance. Cactis is intended for use in
                 applications which are conducive to an object-oriented
                 approach and involve derived data. Such applications
                 include software environments. \par

                 Cactis supports the construction of objects and
                 type/subtype hierarchies, which are useful for managing
                 the complex and highly-interrelated data found in
                 software environments. Such data types include
                 programs, requirement specifications, milestone
                 reports, configurations, documentation, and many
                 others. Cactis uses techniques based on attributed
                 graphs to ensure that functionally-defined attributes
                 of objects, such as compilation dependencies, cost
                 calculations, and milestone dependencies can be
                 maintained efficiently. Since it is necessary to
                 dynamically add new tools (such as debuggers and
                 compilers) to a software environment, the DBMS allows
                 the user to extend the type structure. The system also
                 supports an efficient rollback and recovery mechanism,
                 which provides the framework for a software version
                 facility.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Software --- Software Engineering --- Programming
                 Environments (D.2.6); Software --- Programming
                 Languages --- Language Constructs and Features (D.3.3);
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}",
}

@InProceedings{Croft:1987:SOD,
  author =       "W. B. Croft and D. W. Stemple",
  title =        "Supporting office document architectures with
                 constrained types",
  crossref =     "Dayal:1987:PAC",
  pages =        "504--509",
  year =         "1987",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/38713/p504-croft/p504-croft.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/38713/p504-croft/",
  abstract =     "Data models have been proposed as a means of defining
                 the objects and operations in an office information
                 system. Office documents, because of their highly
                 variable structure and multimedia content, are a
                 difficult class of objects to model. The modeling task
                 is further complicated by document architecture
                 standards used for interchange between systems. We
                 present an approach to data modeling based on
                 constrained type definitions that allows architecture
                 standards to be defined and ensures that individual
                 document types conform to those standards. The ADABTPL
                 model, which is used to define the schema of document
                 types and standards, is described.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Information Systems
                 Applications --- Office Automation (H.4.1); Software
                 --- Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Data types and structures};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Software ---
                 Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Abstract data types}",
}

@InProceedings{Delgrande:1987:FBA,
  author =       "J. P. Delgrande",
  title =        "Formal Bounds on Automatic Generation and Maintenance
                 of Integrity Constraints",
  crossref =     "ACM:1987:PPS",
  pages =        "??--??",
  month =        mar,
  year =         "1987",
  bibsource =    "Database/Wiederhold.bib;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
}

@InProceedings{Chandra:1988:TDQ,
  author =       "Ashok K. Chandra",
  title =        "Theory of database queries",
  crossref =     "ACM:1988:PPS",
  pages =        "1--9",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p1-chandra/p1-chandra.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p1-chandra/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Swami:1988:OLJ,
  author =       "Arun Swami and Anoop Gupta",
  title =        "Optimization of large join queries",
  crossref =     "ACM:1988:PAC",
  pages =        "8--17",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p8-swami/p8-swami.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p8-swami/",
  abstract =     "We investigate the problem of optimizing
                 Select--Project--Join queries with large numbers of
                 joins. Taking advantage of commonly used heuristics,
                 the problem is reduced to that of determining the
                 optimal join order. This is a hard combinatorial
                 optimization problem. Some general techniques, such as
                 iterative improvement and simulated annealing, have
                 often proved effective in attacking a wide variety of
                 combinatorial optimization problems. In this paper, we
                 apply these general algorithms to the large join query
                 optimization problem. We use the statistical techniques
                 of factorial experiments and analysis of variance
                 (ANOVA) to obtain reliable values for the parameters of
                 these algorithms and to compare these algorithms. One
                 interesting result of our experiments is that the
                 relatively simple iterative improvement proves to be
                 better than all the other algorithms (included the more
                 complex simulated annealing). We also find that the
                 general algorithms do quite well at the maximum time
                 limit.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Computing Methodologies --- Simulation and Modeling
                 --- Applications (I.6.3); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}; Mathematics of Computing --- Numerical
                 Analysis --- Optimization (G.1.6); Mathematics of
                 Computing --- Numerical Analysis --- Numerical Linear
                 Algebra (G.1.3): {\bf Linear systems (direct and
                 iterative methods)}; Mathematics of Computing ---
                 Discrete Mathematics --- Combinatorics (G.2.1): {\bf
                 Combinatorial algorithms}",
}

@InProceedings{Kuper:1988:EPL,
  author =       "Gabriel M. Kuper",
  title =        "On the expressive power of logic programming languages
                 with sets",
  crossref =     "ACM:1988:PPS",
  pages =        "10--14",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p10-kuper/p10-kuper.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p10-kuper/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Shmueli:1988:RRC,
  author =       "Oded Shmueli and Shalom Tsur and Carlo Zaniolo",
  title =        "Rewriting of rules containing set terms in a logic
                 data language {LDL}",
  crossref =     "ACM:1988:PPS",
  pages =        "15--28",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p15-shmueli/p15-shmueli.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p15-shmueli/",
  abstract =     "We propose compilation methods for supporting set
                 terms in Horn clause programs, without using
                 general-purpose set matching algorithms, which tend to
                 run in times exponential in the size of the
                 participating sets Instead, we take the approach of
                 formulating specialized computation plans that, by
                 taking advantage of information available in the given
                 rules, limit the number of alternatives explored. Our
                 strategy is to employ {\em compile time\/} rewriting
                 techniques and to transform the problem into an
                 ``ordinary'' Horn clause compilation problem, with
                 minimal additional overhead. The execution cost of the
                 rewritten rules is substantially lower than that of the
                 original rules and the additional cost of compilation
                 can thus be amortized over many executions",
  acknowledgement = ack-nhfb,
}

@InProceedings{Lohman:1988:GLF,
  author =       "Guy M. Lohman",
  title =        "Grammar-like functional rules for representing query
                 optimization alternatives",
  crossref =     "ACM:1988:PAC",
  pages =        "18--27",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p18-lohman/p18-lohman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p18-lohman/",
  abstract =     "Extensible query optimization requires that the
                 ``repertoire'' of alternative strategies for executing
                 queries be represented as data, not embedded in the
                 optimizer code. Recognizing that query optimizers are
                 essentially expert systems, several researchers have
                 suggested using strategy rules to transform query
                 execution plans into alternative or better plans.
                 Though extremely flexible, these systems can be very
                 inefficient at any step in the processing, many rules
                 may be eligible for application and complicated
                 conditions must be tested to determine that eligibility
                 during unification. We present a constructive,
                 ``building blocks'' approach to defining alternative
                 plans, in which the rules defining alternatives are an
                 extension of the productions of a grammar to resemble
                 the definition of a function in mathematics. The
                 extensions permit each token of the grammar to be
                 parametrized and each of its alternative definitions to
                 have a complex condition. The terminals of the grammar
                 are base-level database operations on tables that are
                 interpreted at run-time. The non-terminals are defined
                 declaratively by production rules that combine those
                 operations into meaningful plans for execution. Each
                 production produces a set of alternative plans, each
                 having a vector of properties, including the estimated
                 cost of producing that plan. Productions can require
                 certain properties of their inputs, such as tuple order
                 and location, and we describe a ``glue'' mechanism for
                 augmenting plans to achieve the required properties. We
                 give detailed examples to illustrate the power and
                 robustness of our rules and to contrast them with
                 related ideas.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Grammars and Other Rewriting Systems (F.4.2): {\bf
                 Grammar types}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Clustering}",
}

@InProceedings{Muralikrishna:1988:EDM,
  author =       "M. Muralikrishna and David J. DeWitt",
  title =        "Equi-depth multidimensional histograms",
  crossref =     "ACM:1988:PAC",
  pages =        "28--36",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:40 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p28-muralikrishna/p28-muralikrishna.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p28-muralikrishna/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Paredaens:1988:PLU,
  author =       "Jan Paredaens and Dirk {Van Gucht}",
  title =        "Possibilities and limitations of using flat operators
                 in nested algebra expressions",
  crossref =     "ACM:1988:PPS",
  pages =        "29--38",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p29-paredaens/p29-paredaens.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p29-paredaens/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Garza:1988:TMO,
  author =       "Jorge F. Garza and Won Kim",
  title =        "Transaction management in an object-oriented database
                 system",
  crossref =     "ACM:1988:PAC",
  pages =        "37--45",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p37-garza/p37-garza.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p37-garza/",
  abstract =     "In this paper, we describe transaction management in
                 ORION, an object-oriented database system. The
                 application environments for which ORION is intended
                 led us to implement the notions of sessions of
                 transactions, and hypothetical transactions
                 (transactions which always abort). The object-oriented
                 data model which ORION implements complicates locking
                 requirements. ORION supports a concurrency control
                 mechanism based on extensions to the current theory of
                 locking, and a transaction recovery mechanism based on
                 conventional logging.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Reliability; Security",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Physical Design (H.2.2):
                 {\bf Recovery and restart}; Information Systems ---
                 Information Storage and Retrieval --- Systems and
                 Software (H.3.4): {\bf ORION}; Information Systems ---
                 Database Management --- Database Administration
                 (H.2.7): {\bf Logging and recovery}; Data --- Files
                 (E.5): {\bf Backup/recovery}",
}

@InProceedings{Hull:1988:EPD,
  author =       "Richard Hull and Jianwen Su",
  title =        "On the expressive power of database queries with
                 intermediate types",
  crossref =     "ACM:1988:PPS",
  pages =        "39--51",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p39-hull/p39-hull.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p39-hull/",
  abstract =     "The {\em set-height\/} of a complex object type is
                 defined to be its level of nesting of the set
                 construct. In a query of the complex object calculus
                 which maps a database {$D$} to an output type {\em T},
                 an {\em intermediate type\/} is a type which is used by
                 some variable of the query, but which is not present in
                 {$D$} or {\em T}. For each $k$, $i$ ? 0 we define CALC
                 {\em k,i\/} to be the family of calculus queries
                 mapping from and to types with set-height $k$ and using
                 intermediate types with set-height $i$ In particular,
                 CALC 0,0 is the relational calculus, and CALC 0,1 is
                 equivalent to the family of second-order (relational)
                 queries \par

                 Several results concerning these families of languages
                 are obtained. A primary focus is on the families CALC
                 0,i, which map relations to relations Upper bounds on
                 the complexity of these families are provided, and it
                 is shown that CALC 0,3 has at least the complexity of
                 exponential space. The CALC 0,i hierarchy does not
                 collapse, because for each {\em i}, CALC 0,i is
                 strictly less expressive than CALC 0,i+2. The union 0i
                 CALC 0,i is strictly less expressive than the family of
                 `computable' database queries. \par

                 The expressive power of queries from the complex object
                 calculus interpreted using a semantics based on the use
                 of arbitrarily large finite numbers of {\em invented
                 values\/} is studied. Under this semantics, the
                 expressive power of the relational calculus is not
                 increased, and the CALC 0,i hierarchy collapses at CALC
                 0,1. We also consider queries which use a bounded
                 number of invented values.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Jagannathan:1988:SDS,
  author =       "D. Jagannathan and B. L. Fritchman and R. L. Guck and
                 J. P. Thompson and D. M. Tolbert",
  title =        "{SIM}: a database system based on the semantic data
                 model",
  crossref =     "ACM:1988:PAC",
  pages =        "46--55",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p46-jagannathan/p46-jagannathan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p46-jagannathan/",
  abstract =     "SIM is a fully featured, commercially available
                 database management system based on a semantic data
                 model similar to Hammer and McLeod's SDM SIM has two
                 primary modeling goals. The first is to narrow the gap
                 between a user's real-world perception of data and the
                 conceptual view imposed by the database system because
                 of modeling presuppositions or limitations. The second
                 goal is to allow, as much as possible, the semantics of
                 data to be defined in the schema and make the database
                 system responsible for enforcing its integrity SIM
                 provides a rich set of constructs for schema
                 definition, including those for specifying
                 generalization hierarchies modeled by directed acyclic
                 graphs, interobject relationships and integrity
                 constraints. It also features a novel, easy-to-use,
                 English-like DML. This paper describes the key modeling
                 features of SIM, the architecture of the system and its
                 implementation considerations.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf Nonprocedural languages**}; Computing
                 Methodologies --- Symbolic and Algebraic Manipulation
                 --- Languages and Systems (I.1.3): {\bf Nonprocedural
                 languages**}",
}

@InProceedings{Kifer:1988:AAD,
  author =       "Michael Kifer and Raghu Ramakrishnan and Avi
                 Silberschatz",
  title =        "An axiomatic approach to deciding query safety in
                 deductive databases",
  crossref =     "ACM:1988:PPS",
  pages =        "52--60",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p52-kifer/p52-kifer.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p52-kifer/",
  abstract =     "A database query is {\em safe\/} if its result
                 consists of a finite set of tuples. If a query is
                 expressed using a set of pure Horn Clauses, the problem
                 of determining query safety is, in general,
                 undecidable. In this paper we consider a slightly
                 stronger notion of safety, called {\em supersafety},
                 for Horn databases in which function symbols are
                 replaced by the abstraction of infinite relations with
                 {\em finiteness constraints\/} [Ramarkrishman et. al
                 87] We show that the supersafety problem is not only
                 decidable, but also {\em axiomatizable}, and the
                 axiomatization yields an effective decision procedure.
                 Although there are safe queries which are not
                 supersafe, we demonstrate that the latter represent
                 quite a large and nontrivial portion of the safe of all
                 safe queries",
  acknowledgement = ack-nhfb,
}

@InProceedings{Caruso:1988:CMO,
  author =       "Michael Caruso and Edward Sciore",
  title =        "Contexts and metamessages in object-oriented database
                 programming language design",
  crossref =     "ACM:1988:PAC",
  pages =        "56--65",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p56-caruso/p56-caruso.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p56-caruso/",
  abstract =     "VISION is an object-oriented database system currently
                 used commercially to develop investment analysis and
                 other large statistical applications. Characteristic of
                 these applications, beside the standard issues of
                 structural and computational richness, is the need to
                 handle time, versions, and concurrency control in a
                 manner that does not produce combinatoric complexity in
                 object protocol. This paper describes the approach
                 taken by VISION in addressing these issues.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Concurrency}; Computer Applications ---
                 Administrative Data Processing (J.1): {\bf Financial}",
}

@InProceedings{Chomicki:1988:TDD,
  author =       "Jan Chomicki and Tomasz Imieli{\'n}ski",
  title =        "Temporal deductive databases and infinite objects",
  crossref =     "ACM:1988:PPS",
  pages =        "61--73",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p61-chomicki/p61-chomicki.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p61-chomicki/",
  abstract =     "We discuss deductive databases with one fixed
                 occurrence of a monadic function symbol({\em
                 successor\/}) per predicate Databases of this kind can
                 be used in a natural way to model simple patterns of
                 events repeated in time, and this is why we term them
                 {\em temporal}. Temporal deductive databases are also
                 interesting from a theoretical point of view, because
                 they give rise to {\em infinite\/} least fix-points and
                 {\em infinite\/} query answers. We study complexity
                 properties of finite query answers and define the
                 notion of {\em infinite objects\/} which makes some
                 infinite least fixpoints computable in finite time",
  acknowledgement = ack-nhfb,
}

@InProceedings{Laurent:1988:PSI,
  author =       "D. Laurent and N. Spyratis",
  title =        "Partition semantics for incomplete information in
                 relational databases",
  crossref =     "ACM:1988:PAC",
  pages =        "66--73",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p66-laurent/p66-laurent.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p66-laurent/",
  abstract =     "We define partition semantics for databases with
                 incomplete information and we present an algorithm for
                 query processing in the presence of incomplete
                 information and functional dependencies. We show that
                 Lipski's model for databases with incomplete
                 information can be seen as a special case of our
                 model.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Software --- Programming
                 Languages --- Formal Definitions and Theory (D.3.1):
                 {\bf Syntax}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Formal Languages
                 (F.4.3): {\bf Classes defined by grammars or
                 automata}",
}

@InProceedings{Ullman:1988:COS,
  author =       "Jeffrey D. Ullman and Moshe Y. Vardi",
  title =        "The complexity of ordering subgoals",
  crossref =     "ACM:1988:PPS",
  pages =        "74--81",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p74-ullman/p74-ullman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p74-ullman/",
  abstract =     "Selection of an appropriate order for the evaluation
                 of subgoals in a logical rule frequently is essential
                 for efficiency. We formulate the problem as one of
                 feasible subgoal orders and show that the question is
                 inherently exponential in time. The proof is by
                 reduction from linear-space alternating Turing machine
                 recognition, which appears to be far easier, in this
                 case, than the more obvious reduction from
                 exponential-time (ordinary) Turing machines",
  acknowledgement = ack-nhfb,
}

@InProceedings{Yuan:1988:SCQ,
  author =       "Li Yan Yuan and Ding-An Chiang",
  title =        "A sound and complete query evaluation algorithm for
                 relational databases with null values",
  crossref =     "ACM:1988:PAC",
  pages =        "74--81",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p74-yuan/p74-yuan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p74-yuan/",
  abstract =     "Reiter has proposed extended relational theory to
                 formulate relational databases with null values and
                 presented a query evaluation algorithm for such
                 databases. However, due to indefinite information
                 brought in by null values, Reiter's algorithm is sound
                 but not complete. In this paper, we first propose an
                 extended relation to represent indefinite information
                 in relational databases. Then, we define an extended
                 relational algebra for extended relations. Based on
                 Reiter's extended relational theory, and our extended
                 relations and the extended relational algebra, we
                 present a sound and complete query evaluation algorithm
                 for relational databases with null values",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf Query
                 languages}",
}

@InProceedings{Morris:1988:AOS,
  author =       "Katherine A. Morris",
  title =        "An algorithm for ordering subgoals in {NAIL?}",
  crossref =     "ACM:1988:PPS",
  pages =        "82--88",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p82-morris/p82-morris.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p82-morris/",
  abstract =     "Rule-goal graphs are the central data structures used
                 in the NAIL system, a knowledge-base system being
                 developed at Stanford University They are constructed
                 while testing the applicability of {\em capture rules},
                 and traversed while generating ICODE to evaluate
                 queries. Generating rule-goal graphs may be reduced to
                 the problem of ordering subgoals. This paper gives an
                 algorithm for generating rule-goal graphs efficiently,
                 in time polynomial in the size of the rules if the
                 arity of recursive predicates is bounded. The graphs
                 generated may be suboptimal for some purposes, but the
                 algorithm will always find a rule-goal graph if one
                 exists. The algorithm has been implemented in Cprolog,
                 and is currently being used to generate rule-goal
                 graphs for the NAIL system",
  acknowledgement = ack-nhfb,
}

@InProceedings{Malvestuto:1988:DPS,
  author =       "F. M. Malvestuto",
  title =        "The derivation problem of summary data",
  crossref =     "ACM:1988:PAC",
  pages =        "82--89",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p82-malvestuto/p82-malvestuto.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p82-malvestuto/",
  abstract =     "Given a statistical database consisting of two summary
                 tables based on a common but not identical
                 classification criterion (e.g., two geographical
                 partitionings of a country) there are additional
                 summary tables that are {\em derivable\/} in the sense
                 that they are uniquely (i.e., with no uncertainty)
                 determined by the tables given. Derivable tables
                 encompass not only, of course, ``less detailed'' tables
                 (that is, aggregated data) but also ``more detailed''
                 tables (that is, disaggregated data). Tables of the
                 second type can be explicitly constructed by using a
                 ``procedure of data refinement'' based on the graph
                 representation of the correspondences between the
                 categories of the two classification systems given in
                 some cases, that is, when such a graph representation
                 meets the {\em acyclicity\/} condition, the underlying
                 database is ``equivalent'' to a single table (called
                 {\em representative table\/}) and then a necessary and
                 sufficient condition for a table to be derivable can be
                 stated.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Graph algorithms}; Computing
                 Methodologies --- Image Processing And Computer Vision
                 --- Segmentation (I.4.6): {\bf Region growing,
                 partitioning}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Data --- Data Structures (E.1): {\bf Tables**};
                 Mathematics of Computing --- Probability and Statistics
                 (G.3): {\bf Statistical computing}; Computing
                 Methodologies --- Artificial Intelligence --- Vision
                 and Scene Understanding (I.2.10): {\bf Modeling and
                 recovery of physical attributes}",
}

@InProceedings{Ramakrishnan:1988:OED,
  author =       "Raghu Ramakrishnan and Catriel Beeri and Ravi
                 Krishnamurthy",
  title =        "Optimizing existential datalog queries",
  crossref =     "ACM:1988:PPS",
  pages =        "89--102",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p89-ramakrishnan/p89-ramakrishnan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p89-ramakrishnan/",
  abstract =     "The problem of pushing projections in recursive rules
                 has received little attention. The objective of this
                 paper is to motivate this problem and present some
                 (partial) solutions. We consider programs with
                 function-free rules, also known as {\em Datalog\/}
                 programs. After formally defining existential
                 subqueries, we present a syntactic criterion for
                 detecting them and then consider optimization in three
                 areas (1) We identify the existential subqueries and
                 make them explicit by rewriting the rules. This, in
                 effect, automatically captures some aspects of Prolog's
                 {\em cut\/} operator that are appropriate to the
                 bottom-up model of computation (2) We eliminate
                 argument positions in recursive rules by ``pushing
                 projections'' (3) We observe that ``pushing
                 projections'' in rules also has the effect of making
                 some rules (even recursive rules) redundant and try to
                 (identify and) discard them",
  acknowledgement = ack-nhfb,
}

@InProceedings{Alexander:1988:PDC,
  author =       "W. Alexander and G. Copeland",
  title =        "Process and dataflow control in distributed
                 data-intensive systems",
  crossref =     "ACM:1988:PAC",
  pages =        "90--98",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p90-alexander/p90-alexander.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p90-alexander/",
  abstract =     "{\em In dataflow architectures, each dataflow
                 operation is typically executed on a single physical
                 node. We are concerned with distributed data-intensive
                 systems, in which each base (i.e., persistent) set of
                 data has been declustered over many physical nodes to
                 achieve load balancing. Because of large base set size,
                 each operation is executed where the base set resides,
                 and intermediate results are transferred between
                 physical nodes. In such systems, each dataflow
                 operation is typically executed on many physical nodes.
                 Furthermore, because computations are data-dependent,
                 we cannot know until run time which subset of the
                 physical nodes containing a particular base set will be
                 involved in a given dataflow operation. This
                 uncertainty creates several problems}. \par

                 {\em We examine the problems of efficient program
                 loading, dataflow--operation activation and
                 termination, control of data transfer among dataflow
                 operations, and transaction commit and abort in a
                 distributed data-intensive system. We show how these
                 problems are interrelated, and we present a unified set
                 of mechanisms for efficiently solving them. For some of
                 the problems, we present several solutions and compare
                 them quantitatively}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Computer Systems Organization --- Processor
                 Architectures --- Other Architecture Styles (C.1.3):
                 {\bf Data-flow architectures}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}; Computer Systems Organization
                 --- Processor Architectures --- Multiple Data Stream
                 Architectures (Multiprocessors) (C.1.2): {\bf Parallel
                 processors**}; Computer Systems Organization ---
                 Computer-Communication Networks --- Network
                 Architecture and Design (C.2.1): {\bf Packet-switching
                 networks}",
}

@InProceedings{Copeland:1988:DPB,
  author =       "George Copeland and William Alexander and Ellen
                 Boughter and Tom Keller",
  title =        "Data placement in {Bubba}",
  crossref =     "ACM:1988:PAC",
  pages =        "99--108",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p99-copeland/p99-copeland.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p99-copeland/",
  abstract =     "{\em This paper examines the problem of data placement
                 in Bubba, a highly-parallel system for data-intensive
                 applications being developed at MCC.
                 ``Highly-parallel'' implies that load balancing is a
                 critical performance issue. ``Data-intensive'' means
                 data is so large that operations should be executed
                 where the data resides. As a result, data placement
                 becomes a critical performance issue}. \par

                 {\em In general, determining the optimal placement of
                 data across processing nodes for performance is a
                 difficult problem. We describe our heuristic approach
                 to solving the data placement problem in Bubba. We then
                 present experimental results using a specific workload
                 to provide insight into the problem. Several
                 researchers have argued the benefits of declustering (i
                 e, spreading each base relation over many nodes). We
                 show that as declustering is increased, load balancing
                 continues to improve. However, for transactions
                 involving complex joins, further declustering reduces
                 throughput because of communications, startup and
                 termination overhead}. \par

                 {\em We argue that data placement, especially
                 declustering, in a highly-parallel system must be
                 considered early in the design, so that mechanisms can
                 be included for supporting variable declustering, for
                 minimizing the most significant overheads associated
                 with large-scale declustering, and for gathering the
                 required statistics}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance; Security",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Computer Systems
                 Organization --- Processor Architectures --- Multiple
                 Data Stream Architectures (Multiprocessors) (C.1.2):
                 {\bf Parallel processors**}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Clustering};
                 Information Systems --- Database Management ---
                 Database Administration (H.2.7): {\bf Logging and
                 recovery}; Computer Systems Organization ---
                 Performance of Systems (C.4): {\bf Reliability,
                 availability, and serviceability}",
}

@InProceedings{Imielinski:1988:ECL,
  author =       "Tomasz Imielinski and Shamim Naqvi",
  title =        "Explicit control of logic programs through rule
                 algebra",
  crossref =     "ACM:1988:PPS",
  pages =        "103--116",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p103-imielinski/p103-imielinski.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p103-imielinski/",
  abstract =     "{\em In this paper we argue with a basic premise in
                 logic programming research that the meaning of a
                 program can be inferred from its syntax alone. We show
                 that users may have a variety of intended models for
                 programs and that a single program may give different
                 intended models under different assumptions of
                 semantics. Our conclusion is that it is impossible to
                 infer the intended model from the syntax of the program
                 and no single semantics will capture all the intended
                 models. We propose as a solution an explicit
                 specification of control. Towards this purpose we
                 define a rule algebra. The user formulates a program as
                 an algebraic specification that directs the execution
                 towards the intended model. The interesting question at
                 that point is how to efficiently implement such
                 programs. We show a natural and easy transformation
                 such that it takes as input an algebraic specification
                 and produces as output a program belonging to a
                 subclass of locally stratified programs. Moreover,
                 there is a homomorphic correspondence between the
                 algebraic expressions and their translations}.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Patterson:1988:CRA,
  author =       "David A. Patterson and Garth Gibson and Randy H.
                 Katz",
  title =        "A case for redundant arrays of inexpensive disks
                 {(RAID)}",
  crossref =     "ACM:1988:PAC",
  pages =        "109--116",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p109-patterson/p109-patterson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p109-patterson/",
  abstract =     "{\em Increasing performance of CPUs and memories will
                 be squandered if not matched by a similar performance
                 increase in I/O. While the capacity of Single Large
                 Expensive Disks (SLED) has grown rapidly, the
                 performance improvement of SLED has been modest.
                 Redundant Arrays of Inexpensive Disks (RAID), based on
                 the magnetic disk technology developed for personal
                 computers, offers an attractive alternative to SLED,
                 promising improvements of an order of magnitude in
                 performance, reliability, power consumption, and
                 scalability. This paper introduces five levels of
                 RAIDs, giving their relative cost/performance, and
                 compares RAID to an IBM 3380 and a Fujitsu Super
                 Eagle}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Hardware --- Input/Output and Data Communications ---
                 Performance Analysis and Design Aids** (B.4.4);
                 Hardware --- Memory Structures --- Performance Analysis
                 and Design Aids** (B.3.3); Hardware --- Memory
                 Structures --- Design Styles (B.3.2): {\bf Mass
                 storage}",
}

@InProceedings{Kumar:1988:SBT,
  author =       "Akhil Kumar and Michael Stonebraker",
  title =        "Semantics based transaction management techniques for
                 replicated data",
  crossref =     "ACM:1988:PAC",
  pages =        "117--125",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p117-kumar/p117-kumar.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p117-kumar/",
  abstract =     "Data is often replicated in distributed database
                 applications to improve availability and response time.
                 Conventional multi-copy algorithms deliver fast
                 response times and high availability for read-only
                 transactions while sacrificing these goals for updates.
                 In this paper, we propose a multi-copy algorithm that
                 works well in both retrieval and update environments by
                 exploiting special application semantics. By
                 subdividing transactions into various categories, and
                 utilizing a commutativity property, we demonstrate
                 cheaper techniques and show that they guarantee
                 correctness. A performance comparison between our
                 techniques and conventional ones quantifies the extent
                 of the savings.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Measurement; Performance;
                 Reliability",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases}; Computer
                 Systems Organization --- Performance of Systems (C.4):
                 {\bf Reliability, availability, and serviceability};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}; Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Installation Management (K.6.2): {\bf Performance and
                 usage measurement}",
}

@InProceedings{Ramakrishna:1988:ABD,
  author =       "M. V. Ramakrishna and P. Mukhopadhyay",
  title =        "Analysis of bounded disorder file organization",
  crossref =     "ACM:1988:PPS",
  pages =        "117--125",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p117-ramakrishna/p117-ramakrishna.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p117-ramakrishna/",
  abstract =     "Recently Litwin and Lomet proposed the Bounded
                 Disorder (BD) file organization which uses a
                 combination of hashing and tree indexing Lomet provided
                 an approximate analysis with a mention of the
                 difficulty involved in exact modeling and analysis. The
                 performance analysis of the method involves solving a
                 classical sequential occupancy problem. We encountered
                 this problem in our attempt to obtain a general model
                 for single access and almost single access retrieval
                 methods developed in the recent years. In this paper,
                 we develop a probability model and present some
                 preliminary results of the exact analysis.",
  acknowledgement = ack-nhfb,
}

@InProceedings{ElAbbadi:1988:GPC,
  author =       "Amr {El Abbadi} and Sam Toueg",
  title =        "The group paradigm for concurrency control",
  crossref =     "ACM:1988:PAC",
  pages =        "126--134",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p126-el_abbadi/p126-el_abbadi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p126-el_abbadi/",
  abstract =     "We propose a paradigm for developing, describing and
                 proving the correctness of concurrency control
                 protocols for replicated databases in the presence of
                 failures or communication restrictions. Our approach is
                 to hierarchically divide the problem of achieving
                 one-copy serializability by introducing the notion of a
                 ``group'' that is a higher level of abstraction than
                 transactions. Instead of dealing with the overall
                 problem of serializing all transactions, our paradigm
                 divides the problem into two simpler ones. (1) A {\em
                 local policy\/} for each group that ensures a total
                 order of all transactions in that group. (2) A {\em
                 global policy\/} that ensures a correct serialization
                 of all groups. We use the paradigm to demonstrate the
                 similarities between several concurrency control
                 protocols by comparing the way they achieve
                 correctness.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance; Reliability",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}; Computer Systems Organization
                 --- Performance of Systems (C.4): {\bf Reliability,
                 availability, and serviceability}",
}

@InProceedings{Srivastava:1988:AMM,
  author =       "Jaideep Srivastava and Doron Rotem",
  title =        "Analytical modeling of materialized view maintenance",
  crossref =     "ACM:1988:PPS",
  pages =        "126--134",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p126-srivastava/p126-srivastava.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p126-srivastava/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hadzilacos:1988:SGA,
  author =       "Thanasis Hadzilacos",
  title =        "Serialization graph algorithms for multiversion
                 concurrency control",
  crossref =     "ACM:1988:PPS",
  pages =        "135--141",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p135-hadzilacos/p135-hadzilacos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p135-hadzilacos/",
  abstract =     "{\em We propose a new algorithmic framework for
                 database concurrency control using multiple versions of
                 data items and a serialization graph of the
                 transactions as a synchronization technique, which
                 generalizes all concurrency control methods known so
                 far. This class of algorithms, called MVSGA for Multi
                 Version Serialization Graph set of Algorithms, works by
                 monitoring the acyclicity of the serialization graph
                 which has nodes corresponding to transactions and arcs
                 corresponding to read-from and other transaction
                 positioning decisions made by the scheduler. For each
                 of the major known schedulers we give examples of MVSGA
                 schedulers that cover them}. \par

                 {\em We propose a criterion for optimality among MVSGA
                 schedulers Choice of versions to read from and relative
                 positioning of transactions in the serialization graph
                 should be done in a way that leaves the largest
                 flexibility possible for future choices. This
                 flexibility is measured as the number of pairs of nodes
                 in the serialization graph that remain incomparable.
                 Unfortunately, enforcing this criterion turns out to be
                 NP-complete, so we describe an MVSGA scheduler based on
                 a heuristic that approximates the optimal}.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Breitbart:1988:MUI,
  author =       "Yuri Breitbart and Avi Silberschatz",
  title =        "Multidatabase update issues",
  crossref =     "ACM:1988:PAC",
  pages =        "135--142",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p135-breitbart/p135-breitbart.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p135-breitbart/",
  abstract =     "A formal model of data updates in a multidatabase
                 environment is developed, and a theory of concurrency
                 control in such an environment is presented. We
                 formulate a correctness condition for the concurrency
                 control mechanism and propose a protocol that allows
                 concurrent execution of a set of global transactions in
                 presence of local ones. This protocol ensures the
                 consistency of the multidatabase and deadlock freedom.
                 We use the developed theory to prove the protocol's
                 correctness and discuss complexity issues of
                 implementing the proposed protocol.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance; Reliability",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}; Information Systems ---
                 Database Management --- Database Administration
                 (H.2.7): {\bf Logging and recovery}",
}

@InProceedings{Kelter:1988:QPD,
  author =       "Udo Kelter",
  title =        "The queue protocol: a deadlock-free, homogeneous,
                 non-two-phase locking protocol",
  crossref =     "ACM:1988:PPS",
  pages =        "142--151",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p142-kelter/p142-kelter.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p142-kelter/",
  abstract =     "The M-pitfall protocol (MPP) is the most general
                 homogeneous non-two-phase locking protocol which
                 supports shared and exclusive locks. It has two major
                 disadvantages: it is not deadlock-free and it has the
                 paradoxical property that concurrency is often reduced
                 if shared locks are used instead of exclusive locks.
                 This paper presents a new protocol, the Queue Protocol
                 (QP), which removes these deficiencies. Although the QP
                 can be regarded an enhancement of the MPP, pitfalls are
                 no more used in the QP; thus, the QP has the further
                 advantage that processing overhead due to pitfalls is
                 avoided.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Abiteboul:1988:DFD,
  author =       "Serge Abiteboul and Richard Hull",
  title =        "Data functions, datalog and negation",
  crossref =     "ACM:1988:PAC",
  pages =        "143--153",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p143-abiteboul/p143-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p143-abiteboul/",
  abstract =     "Datalog is extended to incorporate single-valued
                 ``data functions'', which correspond to attributes in
                 semantic models, and which may be base (user-specified)
                 or derived (computed). Both conventional and stratified
                 datalog are considered. Under the extension, a datalog
                 program may not be consistent, because a derived
                 function symbol may evaluate to something which is not
                 a function. Consistency is shown to be undecidable, and
                 is decidable in a number of restricted cases. A
                 syntactic restriction, {\em panwise consistency}, is
                 shown to guarantee consistency. The framework developed
                 here can also be used to incorporate single-valued data
                 functions into the Complex Object Language (COL), which
                 supports deductive capabilities, complex database
                 objects, and set-valued data functions. \par

                 There is a natural correspondence between the extended
                 datalog introduced here, and the usual datalog with
                 functional dependencies. For families and of
                 dependencies and a family of datalog programs , the -
                 {\em implication problem\/} for asks, given sets F and
                 G and a program P in , whether for all inputs I, I @@@@
                 F implies P(I) @@@@ G. The FD-FD implication problem is
                 undecidable for datalog, and the TGD-EGD implication
                 problem is decidable for stratified datalog. Also, the
                 {\o}-MVD problem is undecidable (and hence also the
                 MVD-preservation problem).",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Measurement; Performance",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Natural Language Processing (I.2.7): {\bf DATALOG};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 DAPLEX}; Information Systems --- Database Management
                 --- Languages (H.2.3): {\bf Query languages}",
}

@InProceedings{Banciihon:1988:OOD,
  author =       "Fran{\c{c}}ois Banciihon",
  title =        "Object-oriented database systems",
  crossref =     "ACM:1988:PPS",
  pages =        "152--162",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p152-banciihon/p152-banciihon.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p152-banciihon/",
  abstract =     "This paper describes my vision of the current state of
                 object-oriented database research. I first briefly
                 define this field by its objectives, and relate it to
                 other database subfields. I describe what I consider to
                 be the main characteristics of an object oriented
                 system, i.e., those which are important to integrate in
                 a database system: encapsulation, object identity,
                 classes or types, inheritance, overriding and late
                 binding. I point out the differences between an object
                 oriented system and an object oriented database system.
                 I also point out the advantages and drawbacks of an
                 object oriented database system with respect to a
                 relational system. Finally, I list some research
                 issues.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Krishnamurthy:1988:FTS,
  author =       "Ravi Krishnamurthy and Raghu Ramakrishnan and Oded
                 Shmueli",
  title =        "A framework for testing safety and effective
                 computability of extended datalog",
  crossref =     "ACM:1988:PAC",
  pages =        "154--163",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p154-krishnamurthy/p154-krishnamurthy.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p154-krishnamurthy/",
  abstract =     "This paper presents a methodology for testing a
                 general logic program containing function symbols and
                 built-in predicates for {\em safety\/} and {\em
                 effective computability}. Safety is the property that
                 the set of answers for a given query is finite. A
                 related issues is whether the evaluation strategy can
                 effectively compute all answers and terminate. We
                 consider these problems under the assumption that
                 queries are evaluated using a bottom-up fixpoint
                 computation. We also approximate the use of function
                 symbols by considering Datalog programs with infinite
                 base relations over which {\em finiteness
                 constraints\/} and {\em monotonicity constraints\/} are
                 considered. One of the main results of this paper is a
                 recursive algorithm, {\em check_clique}, to test the
                 safety and effective computability of predicates in
                 arbitrarily complex cliques. This algorithm takes
                 certain procedures as parameters, and its applicability
                 can be strengthened by making these procedures more
                 sophisticated. We specify the properties required of
                 these procedures precisely, and present a formal proof
                 of correctness for algorithm {\em check_clique}. This
                 work provides a framework for testing safety and
                 effective computability of recursive programs, and is
                 based on a clique by clique analysis. The results
                 reported here form the basis of the safety testing for
                 the LDL language, being implemented at MCC.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Computing
                 Methodologies --- Artificial Intelligence --- Natural
                 Language Processing (I.2.7): {\bf DATALOG}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Mathematical Logic (F.4.1): {\bf Logic and
                 constraint programming}; Computing Methodologies ---
                 Artificial Intelligence --- Deduction and Theorem
                 Proving (I.2.3): {\bf Logic programming}",
}

@InProceedings{Chan:1988:IRD,
  author =       "Edward P. F. Chan and Hector J. Hernandez",
  title =        "Independence-reducible database schemes",
  crossref =     "ACM:1988:PPS",
  pages =        "163--173",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p163-chan/p163-chan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p163-chan/",
  abstract =     "A class of cover embedding database schemes, called
                 independence-reducible, is proposed and is proven to be
                 bounded and algebraic-maintainable, and therefore is
                 highly desirable with respect to query answering and
                 constraint enforcement. This class of schemes is shown
                 to properly contain a superset of all previously known
                 classes of cover embedding BCNF database schemes which
                 are bounded (and constant-time-maintainable). An
                 efficient algorithm is found which recognizes exactly
                 this class of database schemes. Independence-reducible
                 database schemes properly contain a class of
                 constant-time-maintainable database schemes and a
                 condition which characterizes this class of schemes is
                 found, this condition can be tested efficiently.
                 Throughout, it is assumed that a cover of the
                 functional dependencies is embedded in the database
                 scheme in the form of key dependencies.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Chen:1988:IMR,
  author =       "Qiming Chen and Georges Gardarin",
  title =        "An implementation model for reasoning with complex
                 objects",
  crossref =     "ACM:1988:PAC",
  pages =        "164--172",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p164-chen/p164-chen.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p164-chen/",
  abstract =     "In this paper, we first propose a natural syntactical
                 extension of DATALOG called NESTED_DATALOG for dealing
                 with complex objects represented as nested predicates.
                 Then, we introduce the token object model which is a
                 simple extension of the relational model with tokens to
                 represent complex objects and support referential
                 information sharing. An implementation model of a
                 NESTED_DATALOG program is defined by mapping it to the
                 token object model which remains a straightforward
                 extension of classical logical databases. Through this
                 work, we can accommodate two basic requirements. The
                 availability of a rule language for reasoning with
                 complex objects, and the mechanism for mapping a
                 complex object rule program to a relational DBMS
                 offering a pure DATALOG rule language. In summary, the
                 main contributions of the paper are the definition of a
                 rule language for complex objects and the development
                 of a technique to compile this complex object rule
                 language to classical DATALOG.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory; Verification",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Natural Language Processing (I.2.7): {\bf DATALOG};
                 Computing Methodologies --- Artificial Intelligence ---
                 Deduction and Theorem Proving (I.2.3): {\bf Logic
                 programming}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Mathematical Logic
                 (F.4.1): {\bf Logic and constraint programming};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Trees}",
}

@InProceedings{Kim:1988:OFD,
  author =       "Myoung Ho Kim and Sakti Pramanik",
  title =        "Optimal file distribution for partial match
                 retrieval",
  crossref =     "ACM:1988:PAC",
  pages =        "173--182",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p173-kim/p173-kim.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p173-kim/",
  abstract =     "In this paper we present data distribution methods for
                 parallel processing environment. The primary objective
                 is to process partial match retrieval type queries for
                 parallel devices. \par

                 The main contribution of this paper is the development
                 of a new approach called FX (Fieldwise eXclusive)
                 distribution for maximizing data access concurrency. An
                 algebraic property of exclusive-or operation, and field
                 transformation techniques are fundamental to this data
                 distribution techniques. We have shown through theorems
                 and corollaries that this FX distribution approach
                 performs better than other methods proposed earlier. We
                 have also shown, by computing probability of optimal
                 distribution and query response time, that FX
                 distribution gives better performance than others over
                 a large class of partial match queries. This approach
                 presents a new basis in which optimal data distribution
                 for more general type of queries can be formulated.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Computer Systems
                 Organization --- Performance of Systems (C.4): {\bf
                 Performance attributes}; Computer Systems Organization
                 --- Processor Architectures --- Multiple Data Stream
                 Architectures (Multiprocessors) (C.1.2): {\bf Parallel
                 processors**}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Distributed
                 databases}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Query processing}",
}

@InProceedings{Hegner:1988:DRS,
  author =       "Stephen J. Hegner",
  title =        "Decomposition of relational schemata into components
                 defined by both projection and restriction",
  crossref =     "ACM:1988:PPS",
  pages =        "174--183",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p174-hegner/p174-hegner.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p174-hegner/",
  abstract =     "A generalized approach to the decomposition of
                 relational schemata is developed in which the component
                 views may be defined using both restriction and
                 projection operators, thus admitting both horizontal
                 and vertical decompositions. The realization of
                 restrictions is enabled through the use of a Boolean
                 algebra of types, while true independence of
                 projections is modelled by permitting null values in
                 the base schema. The flavor of the approach is
                 algebraic, with the collection of all candidate views
                 of a decomposition modelled within a lattice-like
                 framework, and the actual decompositions arising as
                 Boolean subalgebraic. Central to the framework is the
                 notion of {\em sidimensional join dependency}, which
                 generalizes the classical notion of join dependency by
                 allowing the components of the join to be selected
                 horizontally as well as vertically. Several properties
                 of such dependencies are presented, including a
                 generalization of many of the classical results known
                 to be equivalent to schema acyclicity. Finally, a
                 characterization of the nature of dependencies which
                 participate in decompositions is presented. It is shown
                 that there are two major types, the bidimensional join
                 dependencies, which are tuple generating and allow
                 tuple removal by implicit encoding of knowledge, and
                 splitting dependencies, which simply partition the
                 database into two components.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hutflesz:1988:TGF,
  author =       "Andreas Hutflesz and Hans-Werner Six and Peter
                 Widmayer",
  title =        "Twin grid files: space optimizing access schemes",
  crossref =     "ACM:1988:PAC",
  pages =        "183--190",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p183-hutflesz/p183-hutflesz.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p183-hutflesz/",
  abstract =     "Storage access schemes for points, supporting spatial
                 searching, usually suffer from an undesirably low
                 storage space utilization. We show how a given set of
                 points can be distributed among two grid files in such
                 a way that storage space utilization is optimal. The
                 optimal twin grid file can be built practically as fast
                 as a standard grid file, i.e., the storage space
                 optimality is obtained at almost no extra cost. We
                 compare the performances of the standard grid file, the
                 optimal static twin grid file, and an efficient dynamic
                 twin grid file, where insertions and deletions trigger
                 the redistribution of points among the two grid
                 files. Twin grid files utilize storage space at roughly
                 90\%, as compared with the 69\% of the standard grid
                 file. Typical range queries --- the most important
                 spatial search operations --- can be answered in twin
                 grid files at least as fast as in the standard grid
                 file.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Local and Wide-Area
                 Networks (C.2.5): {\bf Access schemes}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Search
                 process}; Information Systems --- Information Storage
                 and Retrieval --- Information Storage (H.3.2): {\bf
                 File organization}; Data --- Files (E.5): {\bf
                 Optimization**}; Data --- Files (E.5): {\bf
                 Organization/structure}",
}

@InProceedings{Batory:1988:CDS,
  author =       "D. S. Batory",
  title =        "Concepts for a database system compiler",
  crossref =     "ACM:1988:PPS",
  pages =        "184--192",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p184-batory/p184-batory.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p184-batory/",
  abstract =     "We propose a very simple formalism based on
                 parameterized types and a rule-based algebra to explain
                 the storage structures and algorithms of database
                 management systems. Implementations of DBMSs are
                 expressed as equations If all functions referenced in
                 the equations have been implemented the software for a
                 DBMS can be synthesized in minutes at little cost, in
                 contrast to current methods where man-years of effort
                 and hundreds of thousands of dollars are required. Our
                 research aims to develop a DBMS counterpart to today's
                 compiler-compiler technologies",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ramakrishna:1988:HPA,
  author =       "M. V. Ramakrishna",
  title =        "Hashing practice: analysis of hashing and universal
                 hashing",
  crossref =     "ACM:1988:PAC",
  pages =        "191--199",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p191-ramakrishna/p191-ramakrishna.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p191-ramakrishna/",
  abstract =     "Much of the literature on hashing deals with overflow
                 handling (collision resolution) techniques and its
                 analysis. What does all the analytical results mean in
                 practice and how can they be achieved with practical
                 files? This paper considers the problem of achieving
                 analytical performance of hashing techniques in
                 practice with reference to successful search lengths,
                 unsuccessful search lengths and the expected worst case
                 performance (expected length of the longest probe
                 sequence). There has been no previous attempt to
                 explicitly link the analytical results to performance
                 of real life files. Also, the previously reported
                 experimental results deal mostly with successful search
                 lengths. We show why the well known division method
                 performs ``well'' under a specific model of selecting
                 the test file. We formulate and justify an hypothesis
                 that by choosing functions from a particular class of
                 hashing functions, the analytical performance can be
                 obtained in practice on real life files. Experimental
                 results presented strongly support our hypothesis.
                 Several interesting problems arising are mentioned in
                 conclusion.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Data --- Data Storage Representations
                 (E.2): {\bf Hash-table representations}",
}

@InProceedings{Hadzilacos:1988:TSO,
  author =       "Thanasis Hadzilacos and Vassos Hadzilacos",
  title =        "Transaction synchronisation in object bases",
  crossref =     "ACM:1988:PPS",
  pages =        "193--200",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p193-hadzilacos/p193-hadzilacos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p193-hadzilacos/",
  abstract =     "In this paper we investigate the problem of
                 synchronising transactions in an object base. An object
                 base is a collection of objects, much the way a
                 database is a collection of data. An object, for our
                 purposes, consists of a collection of variables (whose
                 values at any point in time comprise the state of that
                 object) and a set of operations, called methods, that
                 are the only means of accessing (sensing or modifying)
                 the object's variables \par

                 There is a certain sense in which a traditional
                 database is an object base. It consists of ``objects''
                 (records, tuples or what have you) each of which has a
                 state that can be accessed only through the operations
                 Read and Write. The main difference is that in an
                 object base, each object supplies its own methods and
                 these are arbitrary. In particular, a method for a
                 certain object may call methods of other objects to
                 carry out its task. In contrast to certain models in
                 which objects correspond to ``levels of abstraction'',
                 our model is completely general in this respect for
                 example, it is permissible for a method of object {$A$}
                 to call a method of object {$B$} which, in turn, may
                 call some other method of object {$A$} again \par

                 One implication of this difference between data and
                 object bases is that in the latter the assumption,
                 commonly made in the former, that the operations which
                 manipulate the state of the objects are short enough to
                 be implemented serially (one at a time) is no longer
                 valid. A related implication is that in object bases we
                 are faced with the necessity of dealing with nested
                 transactions, since the invocation of one method may
                 result in further method invocations \par

                 Another, less fundamental, difference between data and
                 object bases is that, in addition to being of uniform
                 type, the ``objects'' of a database are usually assumed
                 to be of uniform size as well. In an object base one
                 can imagine objects of widely differing sizes. A clock
                 and the New York City telephone directory could be
                 objects differing in size by orders of magnitude, yet
                 co-existing in the same object base \par

                 In spite of these differences it is possible to
                 approach concurrency control in an object base in the
                 following way. Each object is viewed as a database
                 item. Further, each method invocation is treated as a
                 group of Read or Write operations on those data items
                 that were accessed as a result of that method
                 invocation. With these analogies, any conventional
                 database concurrency control method (two-phase locking,
                 timestamp ordering, certification, and the whole lot)
                 can be employed to synchronise concurrent transactions
                 in the object base. This approach has the virtue of
                 simplicity and may be well-suited to certain
                 environments. It is, for example, the approach taken in
                 the GemStone project and product (cf Maier and Stein
                 [1987], Purdy {\em et al\/} [1987]) \par

                 We are interested in exploring approaches to
                 concurrency control in object bases which take into
                 account their special features and differences from
                 databases. The hope is that this will lead to more
                 efficient techniques. More specifically, we would like
                 to consider mechanisms that \par

                 Take into account the nested nature of transactions
                 \par

                 Allow methods accessing an object to execute
                 concurrently (but correctly) This seems especially
                 important as multiprocessors become available, since
                 forcing serial access to an object's methods restricts
                 parallelism (bear in mind that each method could be a
                 lengthy procedure) \par

                 Are modular, in that each object is responsible for
                 synchronizing the invocations of its own methods as it
                 sees fit \par

                 The first two of these points have been considered by
                 others as well. For example, Argus (cf Liskov and
                 Scheifler [1983]) uses a synchronisation algorithm
                 which is an adaptation of strict two-phase locking in a
                 nested transaction environment. In addition, Argus
                 allows multiple concurrent invo",
  acknowledgement = ack-nhfb,
}

@InProceedings{Ioannidis:1988:DMD,
  author =       "Yannis E. Ioannidis and Miron Livny",
  title =        "Data modeling in {DELAB}",
  crossref =     "ACM:1988:PAC",
  pages =        "200--200",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p200-ioannidis/p200-ioannidis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p200-ioannidis/",
  abstract =     "As the size and complexity of processing and
                 manufacturing systems increases, the need for Database
                 Management Systems (DBMS) that meet the special needs
                 of studies that experiment with such systems becomes
                 more current. System analysts who study the performance
                 of modern processing systems have to manipulate large
                 amounts of data in order to profile the behavior of the
                 system. They have to identify the relationship between
                 the properties of a compound system and a wide spectrum
                 of performance metrics. In a recent study in which we
                 have analyzed a set of distributed concurrency control
                 algorithms, we performed more than 1400 simulation
                 experiments. Each experiment was characterized by more
                 than 6000 input parameters and generated more than 400
                 output values. It is thus clear that powerful means for
                 defining the structure and properties of complex
                 systems are needed, as well as efficient tools to
                 retrieve the data accumulated in the course of the
                 study. We are currently engaged in an effort to develop
                 and implement the DE {\em LAB simulation laboratory\/}
                 that aims to provide such means and tools for
                 simulation studies. \par

                 The goal of the first phase of this effort was to
                 design and implement a simulation language. It ended in
                 1986 when the DE {\em NET\/} (Discrete Event NETwork)
                 simulation language became operational. The language is
                 based on the concept of Discrete Event System
                 Specifications (DEVS). It views the simulator as a
                 collection of self contained objects that communicate
                 via Discrete Event Connectors that provide a unified
                 synchronization protocol In the past two years the
                 language has been used in a number of real life
                 studies. It was used to simulate distributed processing
                 environments, communication protocols, and production
                 lines Several tools have been developed around the
                 language. All tools adhere to the same modeling
                 methodology and thus create a cohesive simulation
                 environment. \par

                 In the second phase of the DE {\em LAB\/} project we
                 have been addressing the data management problem DE
                 {\em NET\/} has been interfaced to a special purpose
                 relational DBMS that can store descriptions of
                 simulation runs and provides access to the stored data
                 Based on our experience with thus DBMS, we have reached
                 the conclusion that system analysts need to be provided
                 with a view of the data that differs from the way the
                 DE {\em NET\/} program views the data, and thus decided
                 to develop a data model that meets their needs. The
                 M@@@@SE data model, which is the result of this effort,
                 has an {\em object oriented\/} flavor. It was developed
                 with the guidance of potential users and was tested on
                 a number of real life simulation studies. \par

                 Although the conception of M@@@@SE was motivated by the
                 specific needs of a simulation laboratory, we believe
                 that it addresses the representational needs of many
                 other environments We have decided to support the
                 notion of an {\em object}. Every object is assigned a
                 unique identifier. Depending on their properties
                 (attributes), objects can simultaneously belong to
                 several {\em classes}, inheriting properties from all
                 of them. Among these classes, one is characterized as
                 the {\em primary\/} class of the object. The notion of
                 a primary class helps achieving a ``conceptual'' as
                 well as a physical clustering among similar objects.
                 Collections of objects are supported as regular objects
                 in M@@@@SE in the form of sets, multisets (bags), and
                 arrays. The {\em extent\/} of a class, i.e., the
                 objects that are known members of the class, is
                 explicitly stored in the database. Every M@@@@SE
                 database schema has a straightforward directed graph
                 representation. Each node represents a class of objects
                 and is labeled by the class name. Relationships between
                 the classes in the schema are captured by the arcs of
                 the graph. Similarly to most object-oriented data
                 models, M@@@@SE has two major types of arcs {\em
                 component arcs\/} and {\em inheritance arcs}\ldots{}
                 \par

                 ",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computing
                 Methodologies --- Simulation and Modeling ---
                 Simulation Languages (I.6.2); Theory of Computation ---
                 Mathematical Logic and Formal Languages --- Formal
                 Languages (F.4.3): {\bf Classes defined by grammars or
                 automata}; Information Systems --- Database Management
                 --- Languages (H.2.3): {\bf Query languages}",
}

@InProceedings{Ono:1988:DMT,
  author =       "Kiyoshi Ono and Mikio Aoyama and Hiroshi Fujimoto",
  title =        "Data management of telecommunications networks",
  crossref =     "ACM:1988:PAC",
  pages =        "201--201",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:40 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p201-ono/p201-ono.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p201-ono/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Herlihy:1988:HCC,
  author =       "Maurice P. Herlihy and William E. Weihl",
  title =        "Hybrid concurrency control for abstract data types",
  crossref =     "ACM:1988:PPS",
  pages =        "201--210",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p201-herlihy/p201-herlihy.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p201-herlihy/",
  abstract =     "We define a new locking protocol that permits more
                 concurrency than existing commutativity-based
                 protocols. The protocol uses timestamps generated when
                 transactions commit to provide more information about
                 the serialization order of transactions, and hence to
                 weaken the constraints on conflicts. In addition, the
                 protocol permits operations to be both partial and
                 non-deterministic, and it permits results of operations
                 to be used in choosing locks. The protocol exploits
                 type-specific properties of objects, necessary and
                 sufficient constraints on lock conflicts are defined
                 directly from a data type specification. We give a
                 complete formal description of the protocol,
                 encompassing both concurrency control and recovery, and
                 prove that the protocol satisfies {\em hybrid
                 atomicity}, a local atomicity property that combines
                 aspects of static and dynamic atomic protocols. We also
                 show that the protocol is optimal in the sense that no
                 hybrid atomic locking scheme can permit more
                 concurrency.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Alho:1988:DDM,
  author =       "Kari Alho and Hannu Peltonen and Martti
                 M{\"a}ntyl{\"a} and Rejio Sulonen",
  title =        "A design data manager",
  crossref =     "ACM:1988:PAC",
  pages =        "202--202",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p202-alho/p202-alho.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p202-alho/",
  abstract =     "{\em HutBase\/} is a visual design data manager that
                 can be used to store and manipulate data objects
                 created and processed by a variety of design
                 applications. In particular, HutBase allows the user to
                 manipulate the data and start applications, and
                 provides a access mechanism for the applications.
                 \par

                 HutBase consists of three software layers. The lowest
                 layer, the {\em Object Management System\/} (OMS), is
                 based on the Entity-Relationship model and includes
                 those basic operations related to the storage and
                 access of design data objects that are common to all
                 applications. The database is divided into {\em
                 workspaces}, which are collections of OMS {\em
                 objects\/} and {\em relationships\/} organized
                 according to an application-dependent schema and
                 forming a significant whole (e.g., a design project)
                 from the user's point of view Workspace is also the
                 unit for locking and access control. \par

                 An object is a collection of {\em attributes}. Each
                 attribute has a name and value. The name is a string
                 and the value is an arbitrary sequence of bytes. The
                 value of an attribute can be of any length, from a
                 single integer to an external representation of a
                 complicated geometric model. A relationship is a named
                 directed connection between two objects. Relationships
                 have attributes like objects. \par

                 The OMS library contains functions for creating,
                 opening and removing workspaces, objects, relationships
                 and attributes. All operations are carried out within
                 {\em transactions}. The functions do not change the
                 permanent data on the disk until the user calls the
                 {\em save_changes\/} function, which saves the current
                 state of all workspaces opened in a given transaction.
                 \par

                 The next layer is a prototype data model built on top
                 of OMS, which stores the objects in each workspace as a
                 hierarchical tree by means of relationships. The leaves
                 of the hierarchy are called {\em representations\/} and
                 contain the actual data manipulated by the
                 applications. Each representation is associated with a
                 {\em representation type}, which in turn are linked to
                 the application programs, or {\em tools}. The
                 representation types and tools are stored as objects in
                 a separate workspace. \par

                 The top level contains a user interface and a
                 procedural application interface. The user interface
                 shows the available representation types, tools, and
                 contents of one or more workspaces in iconic form. A
                 representation can be opened by selecting its icon on
                 the screen. The tool corresponding to the type of the
                 representation is then started with a handle to the
                 representation as argument. The interface also allows
                 the user to create, remove and copy objects. \par

                 The tool programs run as subprocesses of the HutBase
                 process. Tools access the data base by remote procedure
                 calls that send data base requests from the tool
                 process to the HutBase process. The tools can also
                 create relationships between representations and
                 navigate in the workspace by following the relationship
                 links. \par

                 We are currently working on a interpreted definition
                 language that can be used to describe the structure of
                 a workspace. The definition language will be based on
                 an object-oriented notation, where object and relation
                 types form a class hierarchy. Class descriptions
                 include (possibly inherited) methods for dealing with
                 the various HutBase operations. With the contemplated
                 description facility, new object and relationship types
                 can be defined by declaring new subclasses of the
                 existing ones.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Computing Methodologies --- Computer Graphics ---
                 Methodology and Techniques (I.3.6): {\bf Interaction
                 techniques}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}",
}

@InProceedings{Naeymi-Rad:1988:RDD,
  author =       "Frank Naeymi-Rad and Lowell Carmony and David Trace
                 and Christine Georgakis and Max Harry Weil",
  title =        "A relational database design in support of standard
                 medical terminology in multi-domain knowledge bases",
  crossref =     "ACM:1988:PAC",
  pages =        "203--203",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p203-naeymi-rad/p203-naeymi-rad.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p203-naeymi-rad/",
  abstract =     "Relational database techniques have been used to
                 create knowledge bases for a medical diagnostic
                 consultant system. Known as MEDAS (Medical Emergency
                 Decision Assistance System), this expert system, using
                 disorder patterns consisting of features such as
                 symptoms and laboratory results, is able to diagnose
                 multiple disorders. Database technology has been used
                 in MEDAS to develop knowledge engineering tools, called
                 the TOOL BOX, which permit domain experts to create
                 knowledge without the assistance of a knowledge
                 engineer. \par

                 In the process of knowledge development with the TOOL
                 BOX a standardization of terms was needed. This led us
                 to design a Feature Dictionary and a grammar to support
                 a standardized format for features. A common dictionary
                 of features will allow us to merge knowledge bases,
                 translate between multi-domain bases, and compare
                 competing expert systems. In addition, standard
                 terminology will assist communication across domains
                 \par

                 The Feature Dictionary has the following attributes
                 {\em Long\/} forms of the feature name (White Blood
                 Count) and {\em short\/} forms (WBC) as well as a three
                 line description of the feature. The {\em type}, binary
                 (Abdominal Pain), continuous-valued (WBC), or derived
                 (pulse pressure = systolic - diastolic) is also kept
                 for each feature \par

                 For value features the appropriate {\em unit\/} (cc,
                 kg, etc.) as well as {\em range\/} limits are stored so
                 that these can be used as a form of quality control on
                 input. The {\em permanence\/} (Y/N) of each feature is
                 kept so it is possible to automatically include
                 permanent features in future encounters. In addition,
                 for each feature three separate ``{\em cost\/}''
                 parameters are kept. {\em Risk\/} measures the danger
                 to the patient from no risk such as taking a blood
                 pressure to highly invasive proceedings such as a liver
                 biopsy. {\em Time\/} measures whether results can be
                 expected in minutes, hours, or days. {\em Money\/}
                 measures the actual cost to the patient FD-Equivalents
                 stores the synonyms and antonyms of each feature. These
                 are used to translate between knowledge bases using
                 different terminology. \par

                 Features were first classified in terms of a Problem
                 Oriented Medical Record. We have added an anatomical
                 reclassification in terms of body systems. Experts will
                 be able to add new kinds of feature classifications.
                 \par

                 MEDAS, a multi-membership Bayesian model, needs binary
                 representations for its inference. These Binary
                 Features are created by the expert physician in the
                 given disorder patterns. For example, ``WBC 50,000'',
                 or ``Age 2 Female Hematocrit 42'' are binary features
                 that might appear in a disorder pattern. Laboratory
                 results often lead to a multiplicity of binary features
                 (such as ``WBC 3,000'', or 3,000 WBC 10,000, etc.). Our
                 design allows the user to enter the value of such a
                 feature and have the system set of all the
                 corresponding binary features. This intelligent user
                 interface is controlled by a grammar that allows us to
                 parse the binary features and generate rules for them.
                 \par

                 The knowledge base for a particular problem domain such
                 as OB/GYN is organized as a collection of disorder
                 patterns. Each of these is represented as a list of
                 binary features and associated probabilities. The
                 domain knowledge base contains only the features
                 relevant to that domain. \par

                 Experience with the Feature Dictionary has convinced us
                 that there are many advantages in using a DBMS to store
                 the knowledge base for an expert system. The TOOL BOX,
                 originally in ACCENT-R, was rewritten in dBase III for
                 the PC. The knowledge bases created on the PC were then
                 ported to the mainframe. As the number of domains
                 supported by MEDAS grew, it became evident that we
                 needed a DBMS that could function in both environments
                 so we are in the process of converting to ORACLE.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Human Factors",
  subject =      "Computer Applications --- Life and Medical Sciences
                 (J.3): {\bf Medical information systems}; Computing
                 Methodologies --- Artificial Intelligence ---
                 Applications and Expert Systems (I.2.1); Information
                 Systems --- Information Storage and Retrieval ---
                 Content Analysis and Indexing (H.3.1): {\bf
                 Dictionaries}; Information Systems --- Database
                 Management --- Systems (H.2.4)",
}

@InProceedings{Hernandez:1988:CCT,
  author =       "H{\'e}ctor J. Hern{\'a}ndez and Edward P. F. Chan",
  title =        "A characterization of constant-time maintainability
                 for {BCNF} database schemes",
  crossref =     "ACM:1988:PAC",
  pages =        "209--217",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p209-hernandez/p209-hernandez.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p209-hernandez/",
  abstract =     "The {\em maintenance problem\/} (for database states)
                 of a database scheme R with respect to a set of
                 functional dependencies {$F$} is the following decision
                 problem. Let r be a consistent state of R with respect
                 to {$F$} and assume we insert a tuple $t$ into {\em r
                 p\/} [epsilon] r. Is $r$ ? $t$ a consistent state of R
                 with respect to {$F$}? R is said to be {\em
                 constant-time-maintainable\/} with respect to {$F$} if
                 there is an algorithm that solves the maintenance
                 problem of R with respect to {$F$} in time independent
                 of the state size. \par

                 A characterization of constant-time-maintainability for
                 the class of BCNF database schemes is given. An
                 efficient algorithm that tests this characterization is
                 shown, as well as an algorithm for solving the
                 maintenance problem in time independent of the state
                 size. It is also proven that constant-time-maintainable
                 BCNF database schemes are bounded. In particular, it is
                 shown that total projections of the representative
                 instance can be computed via unions of projections of
                 extension joins. Throughout we assume that database
                 schemes are cover embedding and BCNF, and that
                 functional dependencies are given in the form of key
                 dependencies.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Software --- Operating Systems --- File Systems
                 Management (D.4.3): {\bf Maintenance**}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Formal Languages (F.4.3): {\bf Classes defined by
                 grammars or automata}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Normal
                 forms}; Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Query formulation}",
}

@InProceedings{Lanin:1988:CSM,
  author =       "Vladimir Lanin and Dennis Shasha",
  title =        "Concurrent set manipulation without locking",
  crossref =     "ACM:1988:PPS",
  pages =        "211--220",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p211-lanin/p211-lanin.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p211-lanin/",
  abstract =     "Set manipulation consists of the actions {\em insert,
                 delete}, and {\em member\/} on keys. We propose a
                 concurrent set manipulation algorithm that uses no
                 locking at all and requires no aborts, relying instead
                 on atomic read-modify-write operations on single (data)
                 locations. The algorithm satisfies order-preserving
                 serializability through conditions that are strictly
                 looser than existing algorithms",
  acknowledgement = ack-nhfb,
}

@InProceedings{Leuchner:1988:PTA,
  author =       "J. Leuchner and L. Miller and G. Slutzki",
  title =        "A polynomial time algorithm for testing implications
                 of a join dependency and embodied functional
                 dependencies",
  crossref =     "ACM:1988:PAC",
  pages =        "218--224",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p218-leuchner/p218-leuchner.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p218-leuchner/",
  abstract =     "The problem of deciding whether a full join dependency
                 (JD) [ {$R$} ] and a set of functional dependencies
                 (FDs) {$F$} imply an embedded join dependency (EJD) [
                 {$S$} ] is known to be NP-complete. We show that the
                 problem can be decided in polynomial time if {$S$}
                 {$R$} and {$F$} is embedded in {\em R}. Our work uses
                 arguments based on an extension of complete
                 intersection graphs rather than tableaus. This approach
                 has facilitated our results and should prove useful for
                 future research.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- Numerical Algorithms and
                 Problems (F.2.1): {\bf Computations on polynomials};
                 Theory of Computation --- Mathematical Logic and Formal
                 Languages --- Formal Languages (F.4.3): {\bf Classes
                 defined by grammars or automata}; Information Systems
                 --- Database Management --- Logical Design (H.2.1):
                 {\bf Schema and subschema}",
}

@InProceedings{VanGelder:1988:USW,
  author =       "Allen {Van Gelder} and Kenneth Ross and John S.
                 Schlipf",
  title =        "Unfounded sets and well-founded semantics for general
                 logic programs",
  crossref =     "ACM:1988:PPS",
  pages =        "221--230",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p221-van_gelder/p221-van_gelder.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p221-van_gelder/",
  abstract =     "A general logic program (abbreviated to ``program''
                 hereafter) is a set of rules that have both positive
                 and negative subgoals. It is common to view a deductive
                 database as a general logic program consisting of rules
                 (IDB) sitting above elementary relations (EDB, facts).
                 It is desirable to associate one Herbrand model with a
                 program and think of that model as the ``meaning of the
                 program,'' or its ``declarative semantics.'' Ideally,
                 queries directed to the program would be answered in
                 accordance with this model. We introduce {\em unfounded
                 sets\/} and {\em well-founded partial models}, and
                 define the well-founded semantics of a program to be
                 its well-founded partial model. If the well-founded
                 partial model is in fact a model, we call it the {\em
                 well-founded\/} model, and say the program is
                 ``well-behaved''. We show that the class of
                 well-behaved programs properly includes previously
                 studied classes of ``stratified'' and ``locally
                 stratified'' programs Gelfand and Lifschits have
                 proposed a definition of ``unique stable model'' for
                 general logic programs. We show that a program has a
                 unique stable model if it has a well-founded model, in
                 which case they are the same. We discuss why the
                 converse is not true.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Gyssens:1988:PAR,
  author =       "Marc Gyssens and Dirk van Gucht",
  title =        "The powerset algebra as a result of adding programming
                 constructs to the nested relational algebra",
  crossref =     "ACM:1988:PAC",
  pages =        "225--232",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p225-gyssens/p225-gyssens.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p225-gyssens/",
  abstract =     "In this paper, we discuss augmentations of the nested
                 relational algebra with programming constructs, such as
                 while-loops and for-loops. We show that the algebras
                 obtained in this way are equivalent to a slight
                 extension of the powerset algebra, thus emphasizing
                 both the strength and the naturalness of the powerset
                 algebra as a tool to manipulate nested relations, and,
                 at the same time, indicating more direct ways to
                 implement this algebra.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Theory",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Studies of Program Constructs (F.3.3);
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}",
}

@InProceedings{Kolaitis:1988:WNF,
  author =       "Phokion G. Kolaitis and Christos H. Papadimitriou",
  title =        "Why not negation by fixpoint?",
  crossref =     "ACM:1988:PPS",
  pages =        "231--239",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p231-kolaitis/p231-kolaitis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p231-kolaitis/",
  abstract =     "{\em There is a fixpoint semantics for DATALOG
                 programs with negation that is a natural generalization
                 of the standard semantics for DATALOG programs without
                 negation. We show that, unfortunately, several
                 compelling complexity-theoretic obstacles rule out its
                 efficient implementation. As an alternative, we propose
                 Inflationary DATALOG, an efficiently implementable
                 semantics for negation, based on inflationary
                 fixpoints\/}",
  acknowledgement = ack-nhfb,
}

@InProceedings{Mazumdar:1988:RTB,
  author =       "Subhasish Mazumdar and David Stemple and Tim Sheard",
  title =        "Resolving the tension between integrity and security
                 using a theorem prover",
  crossref =     "ACM:1988:PAC",
  pages =        "233--242",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p233-mazumdar/p233-mazumdar.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p233-mazumdar/",
  abstract =     "Some information in databases and knowledge bases
                 often needs to be protected from disclosure to certain
                 users. Traditional solutions involving multi-level
                 mechanisms are threatened by the user's ability to
                 infer higher level information from the semantics of
                 the application. We concentrate on the revelation of
                 secrets through a user running transactions in the
                 presence of database integrity constraints. We develop
                 a method of specifying secrets formally that not only
                 exposes a useful structure and equivalence among
                 secrets but also allows a theorem prover to detect
                 certain security lapses during transaction compilation
                 time.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Security; Verification",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0): {\bf Security, integrity, and
                 protection**}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Transaction
                 processing}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}",
}

@InProceedings{Abiteboul:1988:PDD,
  author =       "Serge Abiteboul and Victor Vianu",
  title =        "Procedural and declarative database update languages",
  crossref =     "ACM:1988:PPS",
  pages =        "240--250",
  year =         "1988",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p240-abiteboul/p240-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p240-abiteboul/",
  acknowledgement = ack-nhfb,
}

@InProceedings{Qian:1988:TLD,
  author =       "Xiaolei Qian and Richard Waldinger",
  title =        "A transaction logic for database specification",
  crossref =     "ACM:1988:PAC",
  pages =        "243--250",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p243-qian/p243-qian.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p243-qian/",
  abstract =     "We introduce a logical formalism for the specification
                 of the dynamic behavior of databases. The evolution of
                 databases is characterized by both the dynamic
                 integrity constraints which describe the properties of
                 state transitions and the transactions whose executions
                 lead to state transitions. Our formalism is based on a
                 variant of first-order situational logic in which the
                 states of computations are explicit objects. Integrity
                 constraints and transactions are uniformly specifiable
                 as expressions in our language. We also point out the
                 application of the formalism to the verification and
                 synthesis of transactions.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Verification",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3)",
}

@InProceedings{Gadia:1988:GMR,
  author =       "Shashi K. Gadia and Chuen-Sing Yeung",
  title =        "A generalized model for a relational temporal
                 database",
  crossref =     "ACM:1988:PAC",
  pages =        "251--259",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p251-gadia/p251-gadia.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p251-gadia/",
  abstract =     "We propose a generalized relational model for a
                 temporal database which allows time stamping with
                 respect to a Boolean algebra of multidimensional time
                 stamps. The interplay between the various temporal
                 dimensions is symmetric. As an application, a two
                 dimensional model which allows objects with real world
                 and transaction oriented time stamps is discussed. The
                 two dimensional model can be used to query the past
                 states of the database. It can also be used to give a
                 precise classification of the errors and updates in a
                 database, and is a promising approach for querying
                 these errors and updates.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Computing Methodologies --- Simulation and Modeling
                 --- Applications (I.6.3); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}",
}

@InProceedings{Naqvi:1988:DUL,
  author =       "Shamim Naqvi and Ravi Krishnamurthy",
  title =        "Database updates in logic programming",
  crossref =     "ACM:1988:PPS",
  pages =        "251--262",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p251-naqvi/p251-naqvi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p251-naqvi/",
  abstract =     "{\em The need for control in logic programs is now
                 being recognized. This is particularly evident when one
                 focuses on allowing updates in logic programs. In this
                 paper we propose a language DatalogA which is an
                 extension of Datalog with updates to base relations. We
                 define some procedural constructs to allow update
                 programs to be written in an easy manner. The (W,p)
                 scheme of Dynamic Logic fits nicely into the semantics
                 of DatalogA programs in which W is taken to be the set
                 of all possible states of the program and p is the
                 accessibility relation between states. We give
                 declarative semantics and equivalent constructed model
                 semantics for DatalogA programs. We show that in the
                 absence of updates our semantics reduce to the
                 classical semantics of Datalog. Finally, we show some
                 examples of non-stratified programs expressed in
                 DatalogA}.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Peinl:1988:HCS,
  author =       "Peter Peinl and Andreas Reuter and Harald Sammer",
  title =        "High contention in a stock trading database: a case
                 study",
  crossref =     "ACM:1988:PAC",
  pages =        "260--268",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p260-peinl/p260-peinl.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p260-peinl/",
  abstract =     "Though in general, current database systems adequately
                 support application development and operation for
                 online transaction processing (OLTP), increasing
                 complexity of applications and throughput requirements
                 reveal a number of weaknesses with respect to the data
                 model and implementation techniques used. By presenting
                 the experiences gained from a case study of a large,
                 high volume stock trading system, representative for a
                 broad class of OLTP applications, it is shown, that
                 this particularly holds for dealing with high frequency
                 access to a small number of data elements (hot spots).
                 As a result, we propose extended data types and several
                 novel mechanisms, which are easy to use and highly
                 increase the expressional power of transaction oriented
                 programming, that effectively cope with hot spots.
                 Moreover, their usefulness and their ability to
                 increased parallelism is exemplified by the stock
                 trading application.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Management",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computer
                 Applications --- Administrative Data Processing (J.1):
                 {\bf Financial}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Transaction
                 processing}; Computing Methodologies --- Simulation and
                 Modeling --- Applications (I.6.3); Computing Milieux
                 --- Management of Computing and Information Systems ---
                 Project and People Management (K.6.1): {\bf Systems
                 analysis and design}; Computing Methodologies ---
                 Simulation and Modeling --- Model Validation and
                 Analysis (I.6.4)",
}

@InProceedings{Muralikrishna:1988:OMR,
  author =       "M. Muralikrishna and David J. DeWitt",
  title =        "Optimization of multiple-relation multiple-disjunct
                 queries",
  crossref =     "ACM:1988:PPS",
  pages =        "263--275",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p263-muralikrishna/p263-muralikrishna.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p263-muralikrishna/",
  abstract =     "In this paper we discuss the optimization of
                 multiple-relation multiple-disjunct queries in a
                 relational database system. Since optimization
                 techniques for conjunctive (single disjunct) queries in
                 relational databases are well known [Smith75, Wong76,
                 Selinger79, Yao79, Youssefi79], the natural way to
                 evaluate a multiple-disjunct query was to execute each
                 disjunct independently [Bernstein81, Kerschberg82]
                 However, evaluating each disjunct independently may be
                 very inefficient. In this paper, we develop methods
                 that merge two or more disjuncts to form a term. The
                 advantage of merging disjuncts to form terms lies in
                 the fact that each term can be evaluated with a single
                 scan of each relation that is present in the term. In
                 addition, the number of times a join is performed will
                 also be reduced when two or more disjuncts are merged.
                 The criteria for merging a set of disjuncts will be
                 presented. As we will see, the number of times each
                 relation in the query is scanned will be equal to the
                 number of terms. Thus, minimizing the number of terms
                 will minimize the number of scans for each relation. We
                 will formulate the problem of minimizing the number of
                 scans as one of covering a merge graph by a minimum
                 number of complete merge graphs which are a restricted
                 class of Cartesian product graphs. In general, the
                 problem of minimizing the number of scans is
                 NP-complete. We present polynomial time algorithms for
                 special classes of merge graphs. We also present a
                 heuristic for general merge graphs. \par

                 Throughout this paper, we will assume that no relations
                 have any indices on them and that we are only concerned
                 with reducing the number of scans for each relation
                 present in the query. What about relations that have
                 indices on them? It turns out that our performance
                 metric of reducing the number of scans is beneficial
                 even in the case that there are indices. In
                 [Muralikrishna88] we demonstrate that when optimizing
                 single-relation multiple-disjunct queries, the cost
                 (measured in terms of disk accesses) may be reduced if
                 all the disjuncts are optimized together rather than
                 individually. Thus, our algorithm for minimizing the
                 number of terms is also very beneficial in cases where
                 indices exist",
  acknowledgement = ack-nhfb,
}

@InProceedings{Haynie:1988:DLD,
  author =       "M. Haynie",
  title =        "A {DBMS} for large design automation databases",
  crossref =     "ACM:1988:PAC",
  pages =        "269--276",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p269-haynie/p269-haynie.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p269-haynie/",
  abstract =     "Large capacity Design Automation (CAD/CAM) database
                 management systems require special capabilities over
                 and above what commercial DBMSs or small
                 workstation-based CAD/CAM systems provide. This paper
                 describes one such system, Tacoma, used at Amdahl
                 Corporation for the storage and retrieval of LSI and
                 VLSI mainframe computer designs Tacoma is based on the
                 relational model with additional object-oriented
                 database features.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf SQL}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Distributed databases}; Computer Applications ---
                 Computer-Aided Engineering (J.6): {\bf Computer-aided
                 design (CAD)}; Computer Systems Organization ---
                 Computer System Implementation --- VLSI Systems
                 (C.5.4); Computer Systems Organization --- Computer
                 System Implementation --- Large and Medium
                 (``Mainframe'') Computers (C.5.1); Software ---
                 Operating Systems --- General (D.4.0): {\bf UNIX};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}",
}

@InProceedings{Hou:1988:SER,
  author =       "Wen-Chi Hou and Gultekin Ozsoyoglu and Baldeo K.
                 Taneja",
  title =        "Statistical estimators for relational algebra
                 expressions",
  crossref =     "ACM:1988:PPS",
  pages =        "276--287",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p276-hou/p276-hou.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p276-hou/",
  abstract =     "Present database systems process all the data related
                 to a query before giving out responses. As a result,
                 the size of the data to be processed becomes excessive
                 for real-time/time-constrained environments. A new
                 methodology is needed to cut down systematically the
                 time to process the data involved in processing the
                 query. To this end, we propose to use data samples and
                 construct an approximate synthetic response to a given
                 query. \par

                 In this paper, we consider only COUNT(E) type queries,
                 where E is an arbitrary relational algebra expression.
                 We make no assumptions about the distribution of
                 attribute values and ordering of tuples in the input
                 relations, and propose consistent and unbiased
                 estimators for arbitrary COUNT(E) type queries. We
                 design a sampling plan based on the cluster sampling
                 method to improve the utilization of sampled data and
                 to reduce the cost of sampling. We also evaluate the
                 performance of the proposed estimators.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Bell:1988:SDM,
  author =       "Jean L. Bell",
  title =        "A specialized data management system for parallel
                 execution of particle physics codes",
  crossref =     "ACM:1988:PAC",
  pages =        "277--285",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p277-bell/p277-bell.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p277-bell/",
  abstract =     "The specialized data management system described in
                 this paper was motivated by the need for much more
                 efficient data management than a standard database
                 management system could provide for particle physics
                 codes in shared memory multiprocessor environments. The
                 special characteristics of data and access patterns in
                 particle physics codes need to be fully exploited in
                 order to effect efficient data management. The data
                 management system allows parameteric user control over
                 system features not usually available to them,
                 especially details of physical design and retrieval
                 such as horizontal clustering, asynchronous I/O, and
                 automatic distribution across processors. In the past,
                 each physics code has constructed the equivalent of a
                 primitive data management system from scratch. The
                 system described in this paper is a generic system that
                 can now be interfaced with a variety of physics
                 codes.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Computer
                 Applications --- Physical Sciences and Engineering
                 (J.2): {\bf Physics}; Computing Methodologies ---
                 Simulation and Modeling --- Applications (I.6.3);
                 Computer Systems Organization --- Processor
                 Architectures --- Multiple Data Stream Architectures
                 (Multiprocessors) (C.1.2): {\bf Parallel processors**};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods}",
}

@InProceedings{Christodoulakis:1988:PAF,
  author =       "Stavros Christodoulakis and Daniel Alexander Ford",
  title =        "Performance analysis and fundamental performance
                 tradeoffs for {CLV} optical disks",
  crossref =     "ACM:1988:PAC",
  pages =        "286--294",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p286-christodoulakis/p286-christodoulakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p286-christodoulakis/",
  abstract =     "CLV type optical disks is a very large and important
                 class of optical disk technology, of which CD-ROM disks
                 form a subclass. \par

                 In this paper we present a model of retrieval from CLV
                 optical disks. We then provide exact and approximate
                 results analyzing the retrieval performance from them.
                 Our analysis takes into account disks with and without
                 a mirror in the read mechanism, small objects
                 completely placed within block boundaries, placement
                 that allows block boundary crossing, as well as very
                 large objects (such as documents) placed within files.
                 \par

                 In the second part of the paper we describe some
                 fundamental implications of physical data base design
                 for data bases stored on CLV optical disks. We show
                 that very significant performance gains may be realized
                 by appropriate design.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Computing Methodologies --- Simulation and Modeling
                 --- Applications (I.6.3); Computing Methodologies ---
                 Simulation and Modeling --- Model Validation and
                 Analysis (I.6.4); Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Retrieval models}; Information
                 Systems --- Database Management --- Physical Design
                 (H.2.2); Hardware --- Memory Structures --- Design
                 Styles (B.3.2): {\bf Mass storage}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Transaction processing}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Search and Retrieval (H.3.3): {\bf Search process}",
}

@InProceedings{Huang:1988:SSM,
  author =       "Bing-Chao Huang and Michael A. Langston",
  title =        "Stable set and multiset operations in optimal time and
                 space",
  crossref =     "ACM:1988:PPS",
  pages =        "288--293",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p288-huang/p288-huang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p288-huang/",
  abstract =     "The focus of this paper is on demonstrating the
                 existence of methods for stably performing set and
                 multiset operations on sorted files of data in both
                 optimal time and optimal extra space. It is already
                 known that stable merging and stable duplicate-key
                 extraction permit such methods. The major new results
                 reported herein are these \par

                 an asymptotically optimal time and space algorithm is
                 devised for stably selecting matched records from a
                 sorted file, \par

                 this selection strategy is employed, along with other
                 algorithmic tools, to prove that all of the elementary
                 binary set operations can be stably performed in
                 optimal time and space on sorted files, and \par

                 after generalizing these operations to multisets in a
                 natural way for file processing, it is proved that each
                 can be stably performed in optimal time and space on
                 sorted files \par

                 ",
  acknowledgement = ack-nhfb,
}

@InProceedings{Yu:1988:MTS,
  author =       "Lin Yu and Daniel J. Rosenkrantz",
  title =        "Minimizing time-space cost for database version
                 control",
  crossref =     "ACM:1988:PPS",
  pages =        "294--301",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p294-yu/p294-yu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p294-yu/",
  abstract =     "We introduce the concept of a version graph to model
                 the problem of minimising the space and version
                 regeneration cost for database version control. We show
                 that, in general, this problem and several of its
                 variations are NP-complete. Motivated by the practical
                 importance of these problems, we develop several
                 heuristics and obtain worst-case guarantees on their
                 performance. We also present linear time algorithms for
                 problems characterized by special classes of version
                 graphs.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Hanson:1988:PQA,
  author =       "Eric N. Hanson",
  title =        "Processing queries aganist database procedures: a
                 performance analysis",
  crossref =     "ACM:1988:PAC",
  pages =        "295--302",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p295-hanson/p295-hanson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p295-hanson/",
  abstract =     "A database procedure is a collection of queries stored
                 in the database. Several methods are possible for
                 processing queries that retrieve the value returned by
                 a database procedure. The conventional algorithm is to
                 execute the queries in a procedure whenever it is
                 accessed. A second strategy requires caching the
                 previous value returned by the database procedure. If
                 the cached value is valid at the time of a query, the
                 value is returned immediately. If the cached value has
                 been invalidated by an update, the value is recomputed,
                 stored back into the cache, and then returned. A third
                 strategy uses a differential view maintenance algorithm
                 to maintain an up-to-date copy of the value returned by
                 the procedure. This paper compares the performance of
                 these three alternatives. The results show that which
                 algorithm is preferred depends heavily on the database
                 environment, particularly, the frequency of updates and
                 the size of objects retrieved by database procedures.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Economics; Languages; Management;
                 Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Query
                 formulation}; Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Query
                 languages}; Computing Methodologies --- Simulation and
                 Modeling --- Applications (I.6.3); Computing Milieux
                 --- Management of Computing and Information Systems ---
                 Installation Management (K.6.2): {\bf Pricing and
                 resource allocation}",
}

@InProceedings{Reiter:1988:WSD,
  author =       "Raymond Reiter",
  title =        "What should a database know?",
  crossref =     "ACM:1988:PPS",
  pages =        "302--304",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p302-reiter/p302-reiter.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p302-reiter/",
  abstract =     "The by now conventional perspective on databases,
                 especially deductive databases, is that they are sets
                 of first order sentences. As such, they can be said to
                 be claims about the truths of some {\em external\/}
                 world, the database is a symbolic representation of
                 that world. \par

                 While agreeing with this account of what a database is,
                 I disagree with how, both in theory and practice, a
                 database is {\em used}, specifically how it is queried
                 and how its integrity is enforced. \par

                 Virtually all approaches to database query evaluation
                 treat queries as first order formulas, usually with
                 free variables whose bindings resulting from the
                 evaluation phase define the answers to the query. The
                 sole exception to this is the work of Levesque (1981,
                 1984), who argues that queries should be formulas in an
                 epistemic modal logic. Queries, in other words, should
                 be permitted to address aspects of the external world
                 as represented in the database, as well as aspects of
                 the database itself i.e., aspects of what the database
                 {\em knows}. To take a simple example, suppose {\em DB
                 = p y q\/} \par

                 Query $p$ (i.e., is $p$ true in the external world?)
                 \par

                 Answer unknown \par

                 Query {\em Kp\/} (i e. do you know whether $p$ is true
                 in the external world?) \par

                 Answer no \par

                 Levesque's modal logic (called KFOPCE) distinguishes
                 between known and unknown individuals in the database
                 and thus accounts for ``regular'' database values as
                 well as null values. For example, if {\em KB\/} is
                 \par

                 {Teach (John, Math100), ($x$) Teach ({\em x}, CS100),
                 Teach (Mary, Psych100) y Teach (Sue, Psych100)},
                 \par

                 then \par

                 Query ($x$) {$K$} Teach (John, $x$) i.e., is there a
                 known course which John teaches? \par

                 Answer yes-Math100 \par

                 Query ($x$) {$K$} Teach ({\em x}, CS100) i e is there a
                 known teacher for CS100? \par

                 Answer No \par

                 Query ($x$) Teach ({\em x}, Psych100) i.e., does anyone
                 teach Psych 100? \par

                 Answer: Yes - Mary or Sue \par

                 Query ($x$) {$K$} Teach ({\em x}, Psych100) i.e., is
                 there a known teacher of Psych100? \par

                 Answer No \par

                 Levesque (1981, 1984) provides a semantics for his
                 language KFOPCE FOPCE, is the first order language
                 KFOPCE without the modal K Levesque proposes that a
                 database is best viewed as a set of FOPCE sentences,
                 and that it be queried by sentences of KFOPCE. He
                 further provides a (noneffective) way of answering
                 database queries. \par

                 Recently I have considered the concept of a static
                 integrity constraint in the context of Levesque's
                 KFOPCE (Reiter 1988). The conventional view of
                 integrity constraints is that, like the database
                 itself, they too are first order formulas (e.g., Lloyd
                 Topor (1985), Nicolas Yazdanian (1978), Reiter (1984)).
                 There are two definitions in the literature of a
                 deductive database {\em KB\/} satisfying an integrity
                 constraint {\em IC}. \par

                 {\em Definition 1\/} Consistency (e.g., Kowalski
                 (1978), Sadri and Kowalski (1987)) {\em KB satisfies IC
                 if f KB + IC is satisfiable\/} \par

                 {\em Definition 2\/} Entailment (e.g., Lloyd and Topor
                 (1985), Reiter (1984)) {\em KB satisfies IC if f KB
                 @@@@ IC\/} \par

                 Alas, neither definition seems correct. Consider a
                 constraint requiring that employees have social
                 security numbers (V $x$) {\em emp\/} ($x$ ) ($y$) {\em
                 ss\#\/} ({\em x y\/}) (1) \par

                 1 Suppose {\em KB\/} = {emp (Mary)} Then {\em KB +
                 IC\/} is satisfiable. But intuitively, we want the
                 constraint to require {\em KB\/} to contain a ss\#
                 ent",
  acknowledgement = ack-nhfb,
}

@InProceedings{Jarke:1988:MKA,
  author =       "Matthias Jarke and Thomas Rose",
  title =        "Managing knowledge about information system
                 evolution",
  crossref =     "ACM:1988:PAC",
  pages =        "303--311",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p303-jarke/p303-jarke.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p303-jarke/",
  abstract =     "This paper describes the design and initial prototype
                 implementation of a knowledge base management system
                 (KBMS) for controlling database software development
                 and maintenance. The KBMS employs a version of the
                 conceptual modelling language CML to represent
                 knowledge about the tool-aided development process of
                 an information system from requirements analysis to
                 conceptual design to implementation, together with the
                 relationship of these system components to the
                 real-world environment in which the information system
                 is intended to function. A decision-centered
                 documentation methodology facilitates communication
                 across time and among multiple developers (and possibly
                 users), thus enabling improved maintenance support.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Documentation; Management",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Knowledge Representation Formalisms and Methods
                 (I.2.4): {\bf Representations (procedural and
                 rule-based)}; Computing Milieux --- Management of
                 Computing and Information Systems --- Software
                 Management (K.6.3): {\bf Software maintenance};
                 Software --- Software Engineering --- Design**
                 (D.2.10): {\bf Representation**}; Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Software Management (K.6.3): {\bf Software
                 development}; Computing Milieux --- Management of
                 Computing and Information Systems --- Project and
                 People Management (K.6.1): {\bf Systems development}",
}

@InProceedings{Buneman:1988:SCO,
  author =       "Peter Buneman and Susan Davidson and Aaron Watters",
  title =        "A semantics for complex objects and approximate
                 queries",
  crossref =     "ACM:1988:PPS",
  pages =        "305--314",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p305-buneman/p305-buneman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p305-buneman/",
  abstract =     "A new definition of complex objects is introduced
                 which provides a denotation for incomplete tuples as
                 well as partially described sets. Set values are
                 ``sandwiched'' between ``complete'' and ``consistent''
                 descriptions (representing the Smyth and Hoare
                 powerdomains respectively), allowing the maximal values
                 to be arbitrary subsets of maximal elements in the
                 domain of the set. We also examine the use of rules in
                 defining queries over such objects.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Naughton:1988:CSR,
  author =       "Jeffrey F. Naughton",
  title =        "Compiling separable recursions",
  crossref =     "ACM:1988:PAC",
  pages =        "312--319",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p312-naughton/p312-naughton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p312-naughton/",
  abstract =     "In this paper we consider evaluating queries on
                 relations defined by a combination of recursive rules.
                 We first define separable recursions. We then give a
                 specialized algorithm for evaluating selections on
                 separable recursions. Like the Generalized Magic Sets
                 and Generalized Counting algorithms, thus algorithm
                 uses selection constants to avoid examining irrelevant
                 portions of the database, however, on some simple
                 recursions this algorithm is $O(n)$, whereas
                 Generalized Magic Sets is $O(n^2)$ and Generalized
                 Counting is $O(2^n)$",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3): {\bf Logic programming};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3):
                 {\bf Query formulation}",
}

@InProceedings{Winslett:1988:FCU,
  author =       "Marianne Winslett",
  title =        "A framework for comparison of update semantics",
  crossref =     "ACM:1988:PPS",
  pages =        "315--324",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p315-winslett/p315-winslett.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p315-winslett/",
  abstract =     "Scattered across the scientific literature of three or
                 more disciplines appears a profusion of proposals for
                 semantics of updates to logical theories. Because no
                 previous work has compared these proposals with one
                 another, the merits and demerits of the various
                 approaches are not well known. Since the semantics
                 differ from one another in systematic ways, it is
                 possible to generalize from existing proposals and
                 speak of the properties of {\em classes\/} of update
                 semantics. In this paper we suggest a two-dimensional
                 taxonomy for characterizing semantics, and describe the
                 properties inherent to the classes implicit therein.
                 Our discussion includes measurement of the
                 computational complexity of the different classes.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Youn:1988:CRF,
  author =       "Cheong Youn and Lawrence J. Henschen and Jiawei Han",
  title =        "Classification of recursive formulas in deductive
                 databases",
  crossref =     "ACM:1988:PAC",
  pages =        "320--328",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p320-youn/p320-youn.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p320-youn/",
  abstract =     "In this paper, we present results on the
                 classification of linear recursive formulas in
                 deductive databases and apply those results to the
                 compilation and optimization of recursive queries. We
                 also introduce compiled formulas and query evaluation
                 plans for a representative query for each of these
                 classes. \par

                 To explain general recursive formulas, we use a graph
                 model that shows the connectivity between variables.
                 The connecticity between variables is the most critical
                 part in processing recursive formulas. We demonstrate
                 that based on such a graph model all the linear
                 recursive formulas can be classified into several
                 classes and each class shares some common
                 characteristics in compilation and query processing.
                 The compiled formulas and the corresponding query
                 evaluation plans can be derived based on the study of
                 the compilation of each class.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Query
                 formulation}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Mathematical Logic
                 (F.4.1): {\bf Recursive function theory}",
}

@InProceedings{Sippu:1988:GTC,
  author =       "Seppo Sippu and Eljas Soisalon-Soininen",
  title =        "A generalized transitive closure for relational
                 queries",
  crossref =     "ACM:1988:PPS",
  pages =        "325--332",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p325-sippu/p325-sippu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p325-sippu/",
  abstract =     "We augment relational algebra with a generalized
                 transitive closure operator that allows for the
                 efficient evaluation of a subclass of recursive
                 queries. The operator is based on a composition
                 operator which is as general as possible when the
                 operator is required to be associative and when only
                 relational algebra operators are used in its
                 definition. The closure of such a composition can be
                 computed using the well-known efficient algorithms
                 designed for the computation of the usual transitive
                 closure. Besides the case in which complete
                 materialization of recursive relations are required,
                 our strategy also yields an efficient solution in the
                 case in which a selection is applied to the closure.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Wolfson:1988:DPL,
  author =       "Ouri Wolfson and Avi Silberschatz",
  title =        "Distributed processing of logic programs",
  crossref =     "ACM:1988:PAC",
  pages =        "329--336",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p329-wolfson/p329-wolfson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p329-wolfson/",
  abstract =     "This paper is concerned with the issue of parallel
                 evaluation of logic programs. To address this issue we
                 define a new concept of {\em predicate
                 decomposability}. If a predicate is decomposable, it
                 means that the load of evaluating it can be divided
                 among a number of processors, without a need for
                 communication among them. This in turn results in a
                 very significant speed-up of the evaluation process.
                 \par

                 We completely characterize three classes of single rule
                 programs (sirups) with respect to decomposability
                 nonrecursive, linear, and simple chain programs. All
                 three classes were studied previously in various
                 contexts. We establish that nonrecursive programs are
                 decomposable, whereas for the other two classes we
                 determine which ones are, and which ones are not
                 decomposable. We also establish two sufficient
                 conditions for sirup decomposability.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Distributed databases}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Logic and constraint
                 programming}; Computing Methodologies --- Artificial
                 Intelligence --- Deduction and Theorem Proving (I.2.3):
                 {\bf Logic programming}",
}

@InProceedings{Haddad:1988:CMC,
  author =       "Ramsey W. Haddad and Jeffrey F. Naughton",
  title =        "Counting methods for cyclic relations",
  crossref =     "ACM:1988:PPS",
  pages =        "333--340",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p333-haddad/p333-haddad.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p333-haddad/",
  abstract =     "In this paper we consider selections of the form
                 ``column = constant'' on relations defined by linear
                 recursive, two rule datalog programs. In general,
                 counting methods perform well on such queries. However,
                 counting methods fail in the presence of cycles in the
                 database. We present an algorithm in the spirit of
                 counting methods that correctly deals with cyclic data
                 and has the same asymptotic running time as counting
                 methods. The algorithm, which is based on reducing a
                 query on a database to a question about intersections
                 of semi-linear sets, works by using efficient methods
                 to construct the appropriate semi-linear sets from the
                 database and query constant.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Group:1988:BNS,
  author =       "{Tandem Performance Group}",
  title =        "A benchmark of non-stop {SQL} on the debit credit
                 transaction",
  crossref =     "ACM:1988:PAC",
  pages =        "337--341",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/citations/proceedings/mod/50202/p337-tandem_performance_group/",
  abstract =     "NonStop SQL is an implementation of ANSI SQL on Tandem
                 Computer Systems Debit Credit is a widely used
                 industry-standard transaction. This paper summarizes a
                 benchmark of NonStop SQL which demonstrated linear
                 growth of throughout from 14 to 208 Debit Credit
                 transactions per second as the hardware grew from 2 to
                 32 processors. The benchmark also compared the
                 performance of NonStop SQL to the performance of a
                 record-at a time file system interface",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Management; Performance",
  subject =      "Computer Systems Organization --- Performance of
                 Systems (C.4): {\bf Performance attributes};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf SQL}; Computing Milieux ---
                 Management of Computing and Information Systems ---
                 Installation Management (K.6.2): {\bf Benchmarks};
                 Computer Systems Organization --- Performance of
                 Systems (C.4): {\bf Measurement techniques}",
}

@InProceedings{Vardi:1988:DUR,
  author =       "Moshe Y. Vardi",
  title =        "Decidability and undecidability results for
                 boundedness of linear recursive queries",
  crossref =     "ACM:1988:PPS",
  pages =        "341--351",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/308386/p341-vardi/p341-vardi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/308386/p341-vardi/",
  abstract =     "If it is possible to eliminate recursion from a
                 Datalog program {\em P}, then {$P$} is said to be {\em
                 bounded}. It was shown by Gaifman et al that the
                 problem of deciding whether a given Datalog program is
                 bounded is undecidable, even for linear programs that
                 has one {\em 4-ary\/} intensional predicate. We sharpen
                 that result by showing that the problem of deciding
                 whether a given Datalog program is bounded is
                 undecidable, even for linear programs that has one {\em
                 binary\/} intensional predicate. We then consider
                 linear programs with a single recursive rule. We show
                 that if the intensional predicate is binary, then the
                 boundedness problem for such program is decidable, in
                 fact, it is NP-complete.",
  acknowledgement = ack-nhfb,
}

@InProceedings{Borr:1988:HPS,
  author =       "A. Borr",
  title =        "High performance {SQL} through low-level system
                 integration",
  crossref =     "ACM:1988:PAC",
  pages =        "342--349",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p342-borr/p342-borr.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p342-borr/",
  abstract =     "NonStop SQL [TM] achieves high performance through an
                 implementation which integrates SQL record access with
                 the pre-existing disk I/O and transaction management
                 subsystems, and moves SQL function downward from the
                 client to the server level of these subsystems. System
                 integration and movement of function to the server
                 reduce message traffic and CPU consumption by putting
                 SQL optimizations at the lower levels of the system.
                 Examples of such optimizations are message traffic
                 savings by filtering data and applying updates at the
                 data source, I/O savings by SQL-optimized buffer pool
                 management, and locking and transaction journaling
                 techniques which take advantage of SQL semantics.
                 Achieving message traffic reduction is particularly
                 important in a distributed, non shared-memory
                 architecture such as the Tandem NonStop System. The
                 result of this implementation is an SQL system which
                 matches the performance of the pre-existing DBMS, while
                 inheriting such pre-existing architecturally-derived
                 features as high availability, transaction-based data
                 integrity, and distribution of both data and
                 execution.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing}; Computer
                 Systems Organization --- Performance of Systems (C.4):
                 {\bf Performance attributes}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf SQL}",
}

@InProceedings{DeWitt:1988:PAG,
  author =       "D. J. DeWitt and S. Ghandeharizadeh and D. Schneider",
  title =        "A performance analysis of the gamma database machine",
  crossref =     "ACM:1988:PAC",
  pages =        "350--360",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p350-dewitt/p350-dewitt.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p350-dewitt/",
  abstract =     "This paper presents the results of an initial
                 performance evaluation of the Gamma database machine.
                 In our experiments we measured the effect of relation
                 size and indices on response time for selection, join,
                 and aggregation queries, and single-tuple updates. A
                 Teradata DBC/1012 database machine of similar size is
                 used as a basis for interpreting the results obtained.
                 We also analyze the performance of Gemma relative to
                 the number of processors employed and study the impact
                 of varying the memory size and disk page size on the
                 execution time of a variety of selection and join
                 queries. We analyze and interpret the results of these
                 experiments based on our understanding of the system
                 hardware and software, and conclude with an assessment
                 of the strengths and weaknesses of Gamma.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Management; Performance",
  subject =      "Computer Systems Organization --- Performance of
                 Systems (C.4): {\bf Measurement techniques}; Computer
                 Systems Organization --- Performance of Systems (C.4):
                 {\bf Performance attributes}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}; Computing Milieux --- Management of
                 Computing and Information Systems --- Installation
                 Management (K.6.2): {\bf Benchmarks}",
  xxauthor =     "D. J. DeWitt and S. Ghanderaizadeh and D. Schneider",
}

@InProceedings{Roesler:1988:SLM,
  author =       "M. Roesler and W. A. Burkhard",
  title =        "Semantic lock models in object-oriented distributed
                 systems and deadlock resolution",
  crossref =     "ACM:1988:PAC",
  pages =        "361--370",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p361-roesler/p361-roesler.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p361-roesler/",
  abstract =     "{\em We propose a distributed algorithm for detection
                 and resolution of resource deadlocks in object-oriented
                 distributed systems. The algorithm proposed is shown to
                 detect and resolve all O(n 1) cycles present in the
                 worst case waits-for-graph (WFG) with n vertices by
                 transmitting O(n 3) messages of small constant size.
                 Its average time complexity has been shown to be O(ne),
                 where e is the number of edges in the WFG After
                 deadlock resolution, the algorithm leaves information
                 in the system concerning dependence relations of
                 running transactions. This information will preclude
                 the wasteful retransmission of messages and reduce the
                 delay in detecting future deadlocks}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Deadlock avoidance};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Theory of
                 Computation --- Computation by Abstract Devices ---
                 Models of Computation (F.1.1): {\bf Computability
                 theory}",
}

@InProceedings{Ramarao:1988:CPD,
  author =       "K. V. S. Ramarao",
  title =        "Commitment in a partitioned distributed database",
  crossref =     "ACM:1988:PAC",
  pages =        "371--378",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p371-ramarao/p371-ramarao.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p371-ramarao/",
  abstract =     "Network partition is among the hardest failure types
                 in a distributed system even if all processors and
                 links are of {\em fail-stop\/} type. We address the
                 transaction commitment problem in a partitioned
                 distributed database. It is assumed that partitions are
                 detectable. The approach taken is conservative - that
                 is, the same transaction cannot be committed by one
                 site and aborted by another. \par

                 A new and very general formal model of protocols
                 operating in a partitioned system is introduced and
                 protocols more efficient than the existing ones are
                 constructed.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}",
}

@InProceedings{Korth:1988:FMC,
  author =       "H. K. Korth and G. Speegle",
  title =        "Formal model of correctness without serializability",
  crossref =     "ACM:1988:PAC",
  pages =        "379--386",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p379-korth/p379-korth.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p379-korth/",
  abstract =     "In the classical approach to transaction processing, a
                 concurrent execution is considered to be correct if it
                 is equivalent to a non-concurrent schedule. This notion
                 of correctness is called {\em serializability}.
                 Serializability has proven to be a highly useful
                 concept for transaction systems for data-processing
                 style applications. Recent interest in applying
                 database concepts to applications in computer-aided
                 design, office information systems, etc. has resulted
                 in transactions of relatively long duration. For such
                 transactions, there are serious consequences to
                 requiring serializability as the notion of correctness.
                 Specifically, such systems either impose long-duration
                 waits or require the abortion of long transactions. In
                 this paper, we define a transaction model that allows
                 for several alternative notions of correctness without
                 the requirement of serializability. After introducing
                 the model, we investigate classes of schedules for
                 transactions. We show that these classes are richer
                 than analogous classes under the classical model.
                 Finally, we show the potential practicality of our
                 model by describing protocols that permit a transaction
                 manager to allow correct non-serializable executions",
  acknowledgement = ack-nhfb,
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}",
}

@InProceedings{Ramnarayan:1988:DKB,
  author =       "R. Ramnarayan and H. Lu",
  title =        "A data\slash knowledge base management testbed and
                 experimental results on data\slash knowledge base query
                 and update processing",
  crossref =     "ACM:1988:PAC",
  pages =        "387--395",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p387-ramnarayan/p387-ramnarayan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p387-ramnarayan/",
  abstract =     "This paper presents our experience in designing and
                 implementing a data/knowledge base management testbed.
                 The testbed consists of two layers, the knowledge
                 manager and the database management system, with the
                 former at the top. The testbed is based on the logic
                 programming paradigm, wherein data, knowledge, and
                 queries are all expressed as Horn clauses. The
                 knowledge manager compiles pure, function-free Horn
                 clause queries into embedded-SQL programs, which are
                 executed by the database management system to produce
                 the query results. The database management system is a
                 commercial relational database system and provides
                 storage for both rules and facts. First, the testbed
                 architecture and major data structures and algorithms
                 are described. Then, several preliminary tests
                 conducted using the current version of the testbed and
                 the conclusions from the test results are presented.
                 The principal contributions of this work have been to
                 unify various concepts, both previously published and
                 new ones we developed, into a real system and to
                 present several insights into data/knowledge base
                 management system design gleaned from the test results
                 and our design and implementation experience.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3): {\bf Logic
                 programming}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Theory of Computation --- Mathematical Logic and Formal
                 Languages --- Mathematical Logic (F.4.1): {\bf Logic
                 and constraint programming}",
}

@InProceedings{Delcambre:1988:SCI,
  author =       "L. M. L. Delcambre and J. N. Etheredge",
  title =        "A self-controlling interpreter for the relational
                 production language",
  crossref =     "ACM:1988:PAC",
  pages =        "396--403",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p396-delcambre/p396-delcambre.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p396-delcambre/",
  abstract =     "The Relational Production Language (RPL) solves the
                 paradigm mismatch between expert systems and database
                 systems by relying on the relational data model as the
                 underlying formalism for an expert system. The result
                 is a formally-defined production system language with
                 immediate access to conventional databases. Working
                 memory is modeled as a relational database and rules
                 consist of a relational query on the left hand side
                 (LHS) and database updates on the right hand side
                 (RHS). This paper reports on the design of the RPL 1 0
                 prototype. The prototype directly executes RPL programs
                 and capitalizes on the inherent advantages of the
                 relational approach, particularly for intra-rule and
                 inter-rule parallelism. By using a self-describing
                 approach for representing the interpreter data
                 structures, the interpreter is a self-controlling
                 system that allows conflict resolution, error handling
                 and a wide spectrum of software metrics to be
                 explicitly specified using RPL meta-rules.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Reliability",
  subject =      "Software --- Programming Languages --- Processors
                 (D.3.4): {\bf Interpreters}; Software --- Software
                 Engineering --- Testing and Debugging (D.2.5): {\bf
                 Error handling and recovery}; Software --- Software
                 Engineering --- Requirements/Specifications (D.2.1):
                 {\bf RPL}; Information Systems --- Database Management
                 --- Logical Design (H.2.1): {\bf Data models}",
}

@InProceedings{Sellis:1988:ILP,
  author =       "T. Sellis and C. C. Lin and L. Raschid",
  title =        "Implementing large production systems in a {DBMS}
                 environment: concepts and algorithms",
  crossref =     "ACM:1988:PAC",
  pages =        "404--423",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p404-sellis/p404-sellis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p404-sellis/",
  abstract =     "It has been widely recognized that many future
                 database applications, including engineering processes,
                 manufacturing and communications, will require some
                 kind of rule based reasoning. In this paper we study
                 methods for storing and manipulating large rule bases
                 using relational database management systems. First, we
                 provide a matching algorithm which can be used to
                 efficiently identify applicable rules. The second
                 contribution of this paper, is our proposal for
                 concurrent execution strategies which surpass, in terms
                 of performance, the sequential OPS5 execution
                 algorithm. The proposed method is fully parallelizable,
                 which makes its use even more attractive, as it can be
                 used in parallel computing environments.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Computing Methodologies --- Artificial Intelligence ---
                 Applications and Expert Systems (I.2.1); Information
                 Systems --- Information Storage and Retrieval ---
                 Content Analysis and Indexing (H.3.1): {\bf Indexing
                 methods}",
}

@InProceedings{Carey:1988:DMQ,
  author =       "Michael J. Carey and David J. DeWitt",
  title =        "A data model and query language for {EXODUS}",
  crossref =     "ACM:1988:PAC",
  pages =        "413--423",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p413-carey/p413-carey.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p413-carey/",
  abstract =     "{\em In this paper, we present the design of the EXTRA
                 data model and the EXCESS query language for the EXODUS
                 extensible database system. The EXTRA data model
                 includes support for complex objects with shared
                 subobjects, a novel mix of object- and value-oriented
                 semantics for data, support for persistent objects of
                 any type in the EXTRA type lattice, and user-defined
                 abstract data types (ADTs). The EXCESS query language
                 provides facilities for querying and updating complex
                 object structures, and it can be extended through the
                 addition of ADT functions and operators, procedures and
                 functions for manipulating EXTRA schema types, and
                 generic set functions EXTRA and EXCESS are intended to
                 serve as a test vehicle for tools developed under the
                 EXODUS extensible database system project}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf EXODUS}; Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Query
                 languages}; Information Systems --- Information Storage
                 and Retrieval --- Information Search and Retrieval
                 (H.3.3): {\bf Query formulation}",
}

@InProceedings{Lecluse:1988:OOD,
  author =       "C. Lecluse and P. Richard and F. Velez",
  title =        "{$O_2$}, an object-oriented data model",
  crossref =     "ACM:1988:PAC",
  pages =        "424--433",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p424-lecluse/p424-lecluse.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p424-lecluse/",
  abstract =     "The {\em Altair\/} group is currently designing an
                 object-oriented data base system called O 2. This paper
                 presents a formal description of the object-oriented
                 data model of this system. It proposes a type system
                 defined in the framework of a set-and-tuple data model.
                 It models the well known inheritance mechanism and
                 enforces strong typing.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Transaction processing}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Schema and subschema}",
}

@InProceedings{Borgida:1988:MCH,
  author =       "A. Borgida",
  title =        "Modeling class hierarchies with contradictions",
  crossref =     "ACM:1988:PAC",
  pages =        "434--443",
  year =         "1988",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/50202/p434-borgida/p434-borgida.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/50202/p434-borgida/",
  abstract =     "One characteristic feature of object-oriented systems
                 and knowledge bases (semantic data models, conceptual
                 modeling languages, Al frames) is that they offer as a
                 basic paradigm the notion of objects grouped into
                 classes, which are themselves organized in subclass
                 hierarchies. Through ideas such as inheritance and
                 bounded polymorphism, this feature supports the
                 technique of ``{\em abstraction by generalization\/}'',
                 which has been argued to be of importance in designing
                 Information Systems [11, 2]. \par

                 We provide in this paper examples demonstrating that in
                 some applications {\em over-generalization\/} is likely
                 to occur an occasional natural subclass may contradict
                 in some way one if its superclass definitions, and thus
                 turn out not to be a strict subtype of this superclass.
                 A similar problem arises when an object is allowed to
                 be a member of several classes which make incompatible
                 predictions about its type. We argue that none of the
                 previous approaches suggested to deal with such
                 situations is entirely satisfactory. \par

                 A language feature is therefore presented to permit
                 class definitions which contradict aspects of other
                 classes, such as superclasses, in an object-based
                 language. In essence, the approach requires
                 contradictions among class definitions to be {\em
                 explicitly\/} acknowledged. We define a semantics of
                 the resulting language, which restores the condition
                 that subclasses are both subsets and subtypes, and
                 deals correctly with the case when an object can belong
                 to several classes. This is done by separating the
                 notions of ``class'' and ``type'', and it allows query
                 compilers to detect type errors as well as eliminate
                 some run-time checks in queries, even in the presence
                 of ``contradictory'' class definitions.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computing
                 Methodologies --- Artificial Intelligence ---
                 Applications and Expert Systems (I.2.1); Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data description languages (DDL)}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data manipulation languages (DML)}",
}

@InProceedings{VanGelder:1989:AFL,
  author =       "A. {Van Gelder}",
  title =        "The alternating fixpoint of logic programs with
                 negation",
  crossref =     "ACM:1989:PPE",
  pages =        "1--10",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p1-van_gelder/p1-van_gelder.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p1-van_gelder/",
  abstract =     "We introduce and describe the {\em alternating
                 fixpoint\/} of a logic program with negation. The
                 underlying idea is to monotonically build up a set of
                 {\em negative\/} conclusions until the least fixpoint
                 is reached, using a transformation related to the one
                 that defines stable models, developed by Gelfand and
                 Lifschitz. From a fixed set of negative conclusions, we
                 can derive the positive conclusions that follow
                 (without deriving any further negative ones), by
                 traditional Horn clause semantics. The union of
                 positive and negative conclusions is called the {\em
                 alternating fixpoint partial model}. The name
                 ``alternating'' was chosen because the transformation
                 runs in two passes; the first pass transforms an
                 underestimate of the set of negative conclusions into
                 an (intermediate) overestimate; the second pass
                 transforms the overestimates into a new underestimate;
                 the composition of the two passes is monotonic.
                 \par

                 Our main theorem is that the alternating fixpoint
                 partial model is exactly the well-founded partial
                 model. \par

                 We also show that a system is {\em fixpoint logic},
                 which permits rule bodies to be first order formulas
                 but requires inductive relations to be positive within
                 them, can be transformed straightforwardly into a
                 normal logic program whose alternating fixpoint partial
                 model corresponds to the least fixpoint of the fixpoint
                 logic system. Thus alternating fixpoint logic is at
                 least as expressive as fixpoint logic. The converse is
                 shown to hold for finite structures.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  keywords =     "design; theory",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Lambda calculus and related systems}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2)",
}

@InProceedings{Salza:1989:ESQ,
  author =       "Silvio Salza and Mario Terranova",
  title =        "Evaluating the size of queries on relational databases
                 with non-uniform distribution and stochastic
                 dependence",
  crossref =     "Clifford:1989:PAS",
  pages =        "8--14",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p8-salza/p8-salza.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p8-salza/",
  abstract =     "{\em The paper deals with the problem of evaluating
                 how the originality of the attributes of a relation,
                 i.e., the number of distinct values in each attribute,
                 is affected by relational operations that reduce the
                 cardinality of the relation. This is indeed an
                 interesting problem in research areas such as database
                 design and query optimization. Some authors have shown
                 that non uniform distributions and stochastic
                 dependence significantly affect the originality of the
                 attributes. Therefore the models that have been
                 proposed in the literature, based on uniformity and
                 independence assumptions, in several situation can not
                 be conveniently utilized. In this paper we propose a
                 probabilistic model that overcomes the need of the
                 uniformity and independence assumptions. The model is
                 exact for non uniform distributions when the attributes
                 are independent, and gives approximate results when
                 stochastic dependence is considered. In the latter case
                 the analytical results have been compared with a
                 simulation, and proved to be quite accurate}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Theory of Computation --- Computation by Abstract
                 Devices --- Modes of Computation (F.1.2): {\bf
                 Probabilistic computation}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}; Computing Methodologies --- Simulation and
                 Modeling --- Applications (I.6.3)",
}

@InProceedings{Przymusinski:1989:ELP,
  author =       "T. C. Przymusinski",
  title =        "Every logic program has a natural stratification and
                 an iterated least fixed point model",
  crossref =     "ACM:1989:PPE",
  pages =        "11--21",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p11-przymusinski/p11-przymusinski.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p11-przymusinski/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p11-przymusinski/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design",
  subject =      "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Lambda
                 calculus and related systems. {\bf F.3.2} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS, Semantics
                 of Programming Languages. {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Nonmonotonic reasoning and belief
                 revision.",
}

@InProceedings{Kolodner:1989:AGC,
  author =       "Elliot Kolodner and Barbara Liskov and William Weihl",
  title =        "Atomic garbage collection: managing a stable heap",
  crossref =     "Clifford:1989:PAS",
  pages =        "15--25",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p15-kolodner/p15-kolodner.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p15-kolodner/",
  abstract =     "Modern database systems use transactions to achieve a
                 high degree of fault-tolerance. Many modern programming
                 languages and systems provide garbage collected heap
                 storage, which frees the programmer from the job of
                 explicitly deallocating storage. In this paper we
                 describe integrated garbage collection and recovery
                 algorithms for managing a {\em stable heap\/} in which
                 accessible objects survive both system crashes and
                 media failures. \par

                 A garbage collector typically both moves and modifies
                 objects which can lead to problems when the heap is
                 stable because a system crash after the start of
                 collection but before enough of the reorganized heap
                 reaches the disk can leave the disk in an inconsistent
                 state. Furthermore, collection has to be coordinated
                 with the recovery system. We present a collection
                 algorithm and recovery system that solves these
                 problems.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Systems (H.2.4);
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Software --- Programming
                 Languages --- Language Classifications (D.3.2): {\bf
                 C++}",
}

@InProceedings{Ross:1989:PSW,
  author =       "K. A. Ross",
  title =        "A procedural semantics for well founded negation in
                 logic programs",
  crossref =     "ACM:1989:PPE",
  pages =        "22--33",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p22-ross/p22-ross.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p22-ross/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p22-ross/",
  abstract =     "We introduce global SLS-resolution, a procedural
                 semantics for well-founded negation as defined by Van
                 Gelder, Ross and Schlipf. Global SLS-resolution extends
                 Prsymusinski's SLS-resolution, and may be applied to
                 all programs, whether locally stratified or not. 1
                 Global SLS-resolution is defined in terms of global
                 trees, a new data structure representing the dependence
                 of goals on derived negative subgoals. We prove that
                 global SLS-resolution is sound with respect to the
                 well-founded semantics, and complete for
                 non-floundering queries.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  keywords =     "design; theory",
  subject =      "{\bf F.3.2} Theory of Computation, LOGICS AND MEANINGS
                 OF PROGRAMS, Semantics of Programming Languages. {\bf
                 F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND
                 FORMAL LANGUAGES, Mathematical Logic, Lambda calculus
                 and related systems. {\bf E.1} Data, DATA STRUCTURES,
                 Trees.",
}

@InProceedings{Dong:1989:DPD,
  author =       "Guozhu Dong",
  title =        "On distributed processibility of datalog queries by
                 decomposing databases",
  crossref =     "Clifford:1989:PAS",
  pages =        "26--35",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p26-dong/p26-dong.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p26-dong/",
  abstract =     "We consider distributed or parallel processing of
                 datalog queries. We address this issue by decomposing
                 databases into a number of subdatabases such that the
                 computation of a program on a database can be achieved
                 by {\em unioning its independent evaluations\/} on the
                 subdatabases. More specifically, we identify two kinds
                 of distributed-processable programs according to the
                 properties of database decomposition. (i) A program is
                 {\em disjoint distributive\/} if it is distributed
                 processable over a decomposition consisting of
                 subdatabases with disjoint domains. A characterization
                 of such programs is given in terms of an easily
                 decidable syntactic property called {\em connectivity}.
                 (ii) A program is {\em bounded distributive\/} if it is
                 distributed processable over a decomposition consisting
                 of subdatabases with a fixed size. Three interesting
                 characterizations of such a program are presented, the
                 first by bounded recursion, the second by equivalence
                 to a 1-bounded-recursive program, and the third by
                 constant parallel complexity",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Mathematics of Computing --- Numerical Analysis ---
                 General (G.1.0): {\bf Parallel algorithms}; Software
                 --- Programming Techniques --- Concurrent Programming
                 (D.1.3); Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Concurrency}",
}

@InProceedings{Bry:1989:LPC,
  author =       "F. Bry",
  title =        "Logic programming as constructivism: a formalization
                 and its application to databases",
  crossref =     "ACM:1989:PPE",
  pages =        "34--50",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p34-bry/p34-bry.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p34-bry/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p34-bry/",
  abstract =     "{\em The features of logic programming that seem
                 unconventional from the viewpoint of classical logic
                 can be explained in terms of constructivistic logic. We
                 motivate and propose a constructivistic proof theory of
                 non-Horn logic programming. Then, we apply this
                 formalization for establishing results of practical
                 interest. First, we show that `stratification' can be
                 motivated in a simple and intuitive way. Relying on
                 similar motivations, we introduce the larger classes of
                 `loosely stratified' and `constructively consistent'
                 programs. Second, we give a formal basis for
                 introducing quantifiers into queries and logic programs
                 by defining `constructively domain independent'
                 formulas. Third, we extend the Generalized Magic Sets
                 procedure to loosely stratified and constructively
                 consistent programs, by relying on a `conditional
                 fixpoint' procedure}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  keywords =     "design",
  subject =      "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Lambda
                 calculus and related systems. {\bf F.3.1} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS,
                 Specifying and Verifying and Reasoning about Programs,
                 Specification techniques.",
}

@InProceedings{Agrawal:1989:OOD,
  author =       "R. Agrawal and N. H. Gehani",
  title =        "{ODE (Object Database and Environment)}: the language
                 and the data model",
  crossref =     "Clifford:1989:PAS",
  pages =        "36--45",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p36-agrawal/p36-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p36-agrawal/",
  abstract =     "ODE is a database system and environment based on the
                 object paradigm. It offers one integrated data model
                 for both database and general purpose manipulation. The
                 database is defined, queried and manipulated in the
                 database programming language O++ which is based on
                 C++. O++ borrows and extends the object definition
                 facility of C++, called the class. Classes support data
                 encapsulation and multiple inheritance. We provide
                 facilities for creating persistent and versioned
                 objects, defining sets, and iterating over sets and
                 clusters of persistent objects. We also provide
                 facilities to associate constraints and triggers with
                 objects. This paper presents the linguistic facilities
                 provided in O++ and the data model it supports.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Systems (H.2.4);
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Software --- Programming
                 Languages --- Language Classifications (D.3.2): {\bf
                 C++}",
}

@InProceedings{Ohori:1989:DPM,
  author =       "Atsushi Ohori and Peter Buneman and Val
                 Breazu-Tannen",
  title =        "Database programming in {Machiavelli} --- a
                 polymorphic language with static type inference",
  crossref =     "Clifford:1989:PAS",
  pages =        "46--57",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p46-ohori/p46-ohori.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p46-ohori/",
  abstract =     "Machiavelli is a polymorphically typed programming
                 language in the spirit of ML, but supports an extended
                 method of type inferencing that makes its polymorphism
                 more general and appropriate for database applications.
                 In particular, a function that selects a field of a
                 records is polymorphic in the sense that it can be
                 applied to any record which contains a field with the
                 appropriate type. When combined with a set data type
                 and database operations including join and projection,
                 this provides a natural medium for relational database
                 programming. Moreover, by implementing database objects
                 as reference types and generating the appropriate views
                 -- sets of structures with ``identity'' -- we can
                 achieve a degree of static type checking for
                 object-oriented databases.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data manipulation languages (DML)}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data description languages (DDL)}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}",
}

@InProceedings{Imielinski:1989:CQP,
  author =       "T. Imielinski and K. Vadaparty",
  title =        "Complexity of query processing in databases with
                 {OR-objects}",
  crossref =     "ACM:1989:PPE",
  pages =        "51--65",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p51-imielinski/p51-imielinski.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p51-imielinski/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p51-imielinski/",
  abstract =     "If ground disjunctive facts are admitted into a
                 database the data complexity of conjunctive queries
                 grows from PTIME into CoNP with some simple examples of
                 CoNP-Complete conjunctive queries. A natural question
                 which arises in this context is whether it is possible
                 to syntactically characterize those queries which are
                 ``bad'' (i.e., CoNP-Complete) from those that are
                 ``good'' (i.e., with PTIME data complexity) given a
                 predefined ``pattern'' of disjunctions in the database.
                 In this paper, we study the data complexity of
                 conjunctive queries. We give a complete syntactic
                 characterization of CoNP-Complete conjunctive queries
                 for a class of disjunctive databases called
                 OR-Databases. Our results can be used in complexity
                 tailored design where design decisions are motivated by
                 complexity of query processing. Also, we establish that
                 a similar complete syntactic characterization for
                 disjunctive queries, with negation allowed only on base
                 predicates, would answer the open problem ``Does Graph
                 Isomorphism belong to PTIME or is it NP-Complete?''.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  keywords =     "design; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf I.2.1} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Applications
                 and Expert Systems. {\bf H.2.0} Information Systems,
                 DATABASE MANAGEMENT, General. {\bf F.1.3} Theory of
                 Computation, COMPUTATION BY ABSTRACT DEVICES,
                 Complexity Measures and Classes, Reducibility and
                 completeness.",
}

@InProceedings{Borgida:1989:CSD,
  author =       "Alexander Borgida and Ronald J. Brachman and Deborah
                 L. McGuinness and Lori Alperin Resnick",
  title =        "{CLASSIC}: a structural data model for objects",
  crossref =     "Clifford:1989:PAS",
  pages =        "58--67",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p58-borgida/p58-borgida.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p58-borgida/",
  abstract =     "CLASSIC is a data model that encourages the
                 description of objects not only in terms of their
                 relations to other known objects, but in terms of a
                 level of intensional structure as well. The CLASSIC
                 language of {\em structured descriptions\/} permits (i)
                 partial descriptions of individuals, under an `open
                 world' assumption, (ii) answers to queries either as
                 extensional lists of values or as descriptions that
                 necessarily hold of all possible answers, and (iii) an
                 easily extensible schema, which can be accessed
                 uniformly with the data. One of the strengths of the
                 approach is that the same language plays multiple roles
                 in the processes of defining and populating the DB, as
                 well as querying and answering. \par

                 CLASSIC (for which we have a prototype main-memory
                 implementation) can actively discover new information
                 about objects from several sources: it can recognize
                 new classes under which an object falls based on a
                 description of the object, it can propagate some
                 deductive consequences of DB updates, it has simple
                 procedural recognizers, and it supports a limited form
                 of forward-chaining rules to derive new conclusions
                 about known objects. \par

                 The kind of language of descriptions and queries
                 presented here provides a new arena for the search for
                 languages that are more expressive than conventional
                 DBMS languages, but for which query processing is still
                 tractable. This space of languages differs from the
                 subsets of predicate calculus hitherto explored by
                 deductive databases.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data manipulation languages
                 (DML)}; Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data description languages
                 (DDL)}; Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}",
}

@InProceedings{Yuan:1989:SCQ,
  author =       "L. Y. Yuan and D.-A. Chiang",
  title =        "A sound and complete query evaluation algorithm for
                 relational databases with disjunctive information",
  crossref =     "ACM:1989:PPE",
  pages =        "66--74",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p66-yuan/p66-yuan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p66-yuan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p66-yuan/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; languages; theory",
  subject =      "{\bf H.2.0} Information Systems, DATABASE MANAGEMENT,
                 General. {\bf F.3.2} Theory of Computation, LOGICS AND
                 MEANINGS OF PROGRAMS, Semantics of Programming
                 Languages, Algebraic approaches to semantics. {\bf
                 H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing.",
}

@InProceedings{Hou:1989:PAR,
  author =       "Wen-Chi Hou and Gultekin Ozsoyoglu and Baldeo K.
                 Taneja",
  title =        "Processing aggregate relational queries with hard time
                 constraints",
  crossref =     "Clifford:1989:PAS",
  pages =        "68--77",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p68-hou/p68-hou.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p68-hou/",
  abstract =     "We consider those database environments in which
                 queries have strict timing constraints, and develop a
                 time-constrained query evaluation methodology. For
                 aggregate relational algebra queries, we describe a
                 time constrained query evaluation algorithm. The
                 algorithm, which is implemented in our prototype DBMS,
                 iteratively samples from input relations, and evaluates
                 the associated estimators developed in our previous
                 work, until a stopping criterion (e.g., a time quota or
                 a desired error range) is satisfied. \par

                 To determine sample sizes at each stage of the
                 iteration (so that the time quota will not be
                 overspent) we need to have (a) accurate sample
                 selectivity estimations of the RA operators in the
                 query, (b) precise time cost formulas, and (c) good
                 time-control strategies. To estimate the sample
                 selectivities of RA operators, we use a runtime sample
                 selectivity estimation and improvement approach which
                 is flexible. For query time estimations, we use
                 time-cost formulas which are adaptive and precise. To
                 use the time quota efficiently, we propose statistical
                 and heuristic time-control strategies to control the
                 risk of overspending the time quota. Preliminary
                 evaluation of the implemented prototype is also
                 presented.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data manipulation languages (DML)}; Computing
                 Methodologies --- Artificial Intelligence --- Knowledge
                 Representation Formalisms and Methods (I.2.4): {\bf
                 Semantic networks}",
}

@InProceedings{Grahne:1989:HTE,
  author =       "G. Grahne",
  title =        "{Horn} tables --- an efficient tool for handling
                 incomplete information in databases",
  crossref =     "ACM:1989:PPE",
  pages =        "75--82",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p75-grahne/p75-grahne.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p75-grahne/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p75-grahne/",
  acknowledgement = ack-nhfb,
  keywords =     "design; theory",
  subject =      "{\bf F.3.2} Theory of Computation, LOGICS AND MEANINGS
                 OF PROGRAMS, Semantics of Programming Languages,
                 Algebraic approaches to semantics. {\bf F.4.1} Theory
                 of Computation, MATHEMATICAL LOGIC AND FORMAL
                 LANGUAGES, Mathematical Logic, Lambda calculus and
                 related systems. {\bf H.2.0} Information Systems,
                 DATABASE MANAGEMENT, General. {\bf H.2.4} Information
                 Systems, DATABASE MANAGEMENT, Systems, Query
                 processing.",
}

@InProceedings{Jagadish:1989:IHR,
  author =       "H. V. Jagadish",
  title =        "Incorporating hierarchy in a relational model of
                 data",
  crossref =     "Clifford:1989:PAS",
  pages =        "78--87",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p78-jagadish/p78-jagadish.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p78-jagadish/",
  abstract =     "We extend the relational model of data to allow
                 classes as attribute values, thereby permitting the
                 representation of hierarchies of objects. Inheritance,
                 including multiple inheritance with exceptions, is
                 clearly supported. Facts regarding classes of objects
                 can be stored and manipulated in the same way as facts
                 regarding object instances. Our model is upwards
                 compatible with the standard relational model.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Data manipulation languages (DML)}",
}

@InProceedings{Vardi:1989:ITA,
  author =       "M. Y. Vardi",
  title =        "Invited talk: automata theory for database
                 theoreticians",
  crossref =     "ACM:1989:PPE",
  pages =        "83--92",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p83-vardi/p83-vardi.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p83-vardi/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p83-vardi/",
  acknowledgement = ack-nhfb,
  keywords =     "design; theory",
  subject =      "{\bf F.1.1} Theory of Computation, COMPUTATION BY
                 ABSTRACT DEVICES, Models of Computation, Automata. {\bf
                 F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND
                 FORMAL LANGUAGES, Mathematical Logic, Lambda calculus
                 and related systems. {\bf H.2.0} Information Systems,
                 DATABASE MANAGEMENT, General. {\bf F.4.3} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Formal Languages, Algebraic language theory. {\bf
                 F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT
                 DEVICES, Modes of Computation, Alternation and
                 nondeterminism.",
}

@InProceedings{Cammarata:1989:ERD,
  author =       "Stephanie Cammarata and Prasadram Ramachandra and
                 Darrell Shane",
  title =        "Extending a relational database with deferred
                 referential integrity checking and intelligent joins",
  crossref =     "Clifford:1989:PAS",
  pages =        "88--97",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p88-cammarata/p88-cammarata.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p88-cammarata/",
  abstract =     "Interactive use of relational database management
                 systems (DBMS) requires a user to be knowledgeable
                 about the semantics of the application represented in
                 the database. In many cases, however, users are not
                 trained in the application field and are not DBMS
                 experts. Two categories of functionality are
                 problematic for such users: (1) updating a database
                 without violating integrity constraints imposed by the
                 domain and (2) using join operations to retrieve data
                 from more than one relation. We have been conducting
                 research to help an uninformed or casual user interact
                 with a relational DBMS. \par

                 This paper describes two capabilities to aid an
                 interactive database user who is neither an application
                 specialist nor a DBMS expert. We have developed
                 deferred Referential Integrity Checking (RIC) and
                 Intelligent Join (IJ) which extend the operations of a
                 relational DBMS. These facilities are made possible by
                 explicit representation of database semantics combined
                 with a relational schema. Deferred RIC is a static
                 validation procedure that checks uniqueness of tuples,
                 non-null keys, uniqueness of keys, and inclusion
                 dependencies. IJ allows a user to identify only the
                 ``target'' data which is to be retrieved without the
                 need to additionally specify ``join clauses''. In this
                 paper we present the motivation for these facilities,
                 describe the features of each, and present examples of
                 their use.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Human Factors",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Models and Principles --- User/Machine
                 Systems (H.1.2): {\bf Human factors}; Information
                 Systems --- Information Storage and Retrieval ---
                 Content Analysis and Indexing (H.3.1): {\bf
                 Dictionaries}",
}

@InProceedings{Manchanda:1989:DED,
  author =       "S. Manchanda",
  title =        "Declarative expression of deductive database updates",
  crossref =     "ACM:1989:PPE",
  pages =        "93--100",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p93-manchanda/p93-manchanda.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p93-manchanda/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p93-manchanda/",
  abstract =     "An update can be specified as a single database state
                 transition, or as a sequence of queries and database
                 state transitions. We give an extension of Datalog for
                 expressing both types of update specifications on a
                 logic database. The extension supports the simple and
                 intuitive expression of basic update operations,
                 hypothetical reasoning and update procedures. The
                 extension possesses a possible-world semantics, and a
                 sound and complete proof theory. Soundness and
                 completeness is proved by showing that an update
                 procedure can be mapped into a semantically equivalent
                 Pure Prolog program. This means that the semantic and
                 proof-theoretic results of Pure Prolog can be mapped
                 into similar results for the Datalog extension.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  keywords =     "design",
  subject =      "{\bf F.3.1} Theory of Computation, LOGICS AND MEANINGS
                 OF PROGRAMS, Specifying and Verifying and Reasoning
                 about Programs. {\bf I.2.1} Computing Methodologies,
                 ARTIFICIAL INTELLIGENCE, Applications and Expert
                 Systems. {\bf H.2.0} Information Systems, DATABASE
                 MANAGEMENT, General. {\bf F.3.2} Theory of Computation,
                 LOGICS AND MEANINGS OF PROGRAMS, Semantics of
                 Programming Languages. {\bf F.4.1} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic. {\bf D.3.2} Software, PROGRAMMING
                 LANGUAGES, Language Classifications, Prolog.",
}

@InProceedings{Copeland:1989:CHA,
  author =       "George Copeland and Tom Keller",
  title =        "A comparison of high-availability media recovery
                 techniques",
  crossref =     "Clifford:1989:PAS",
  pages =        "98--109",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p98-copeland/p98-copeland.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p98-copeland/",
  abstract =     "{\em We compare two high-availability techniques for
                 recovery from media failures in database systems. Both
                 techniques achieve high availability by having two
                 copies of all data and indexes, so that recovery is
                 immediate. ``Mirrored declustering'' spreads two copies
                 of each relation across two identical sets of disks.
                 ``Interleaved declustering'' spreads two copies of each
                 relation across one set of disks while keeping both
                 copies of each tuple on separate disks. Both techniques
                 pay the same costs of doubling storage requirements and
                 requiring updates to be applied to both copies}.
                 \par

                 {\em Mirroring offers greater simplicity and
                 universality. Recovery can be implemented at lower
                 levels of the system software (e.g., the disk
                 controller). For architectures that do not share disks
                 globally, it allows global and local cluster indexes to
                 be independent. Also, mirroring does not require data
                 to be declustered (i.e., spread over multiple disks)}.
                 \par

                 {\em Interleaved declustering offers significant
                 improvements in recovery time, mean time to loss of
                 both copies of some data, throughput during normal
                 operation, and response time during recovery. For all
                 architectures, interleaved declustering enables data to
                 be spread over twice as many disks for improved load
                 balancing. We show how tuning for interleaved
                 declustering is simplified because it is dependent only
                 on a few parameters that are usually well known for a
                 specific workload and system configuration}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Measurement; Performance;
                 Security",
  subject =      "Information Systems --- Database Management ---
                 General (H.2.0): {\bf Security, integrity, and
                 protection**}; Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Recovery
                 and restart}; Information Systems --- Database
                 Management --- Database Administration (H.2.7): {\bf
                 Logging and recovery}; Data --- Files (E.5): {\bf
                 Backup/recovery}",
}

@InProceedings{Atzeni:1989:UDW,
  author =       "P. Atzeni and R. Torlone",
  title =        "Updating databases in the weak instance model",
  crossref =     "ACM:1989:PPE",
  pages =        "101--109",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p101-atzeni/p101-atzeni.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p101-atzeni/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p101-atzeni/",
  abstract =     "{\em Database updates have recently received much more
                 attention than in the past. In this trend, a solid
                 foundation is provided to the problem of updating
                 databases through interfaces based on the weak instance
                 model. Insertions and deletions of tuples are
                 considered}. \par

                 {\em As a preliminary tool, a lattice on states is
                 defined, based on the information content of the
                 various states}. \par

                 {\em Potential results of an insertion are states that
                 contain at least the information in the original state
                 and that in the new tuple. Sometimes there is no
                 potential result, and in the other cases there may be
                 many of them. We argue that the insertion is
                 deterministic if the state that contains the
                 information common to all the potential results (the
                 greatest lower bound, in the lattice framework) is
                 itself a potential result. Effective characterizations
                 for the various cases exist. A symmetric approach is
                 followed for deletions, with fewer cases, since there
                 are always potential results; determinism is
                 characterized consequently}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  keywords =     "design; theory",
  subject =      "{\bf H.2.0} Information Systems, DATABASE MANAGEMENT,
                 General. {\bf F.1.2} Theory of Computation, COMPUTATION
                 BY ABSTRACT DEVICES, Modes of Computation, Parallelism
                 and concurrency. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Query processing. {\bf
                 I.2.1} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Applications and Expert Systems.",
}

@InProceedings{Tay:1989:AA,
  author =       "Y. C. Tay",
  title =        "Attribute agreement",
  crossref =     "ACM:1989:PPE",
  pages =        "110--119",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p110-tay/p110-tay.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p110-tay/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p110-tay/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory",
  subject =      "{\bf G.2.2} Mathematics of Computing, DISCRETE
                 MATHEMATICS, Graph Theory. {\bf H.2.4} Information
                 Systems, DATABASE MANAGEMENT, Systems, Distributed
                 databases. {\bf F.2.2} Theory of Computation, ANALYSIS
                 OF ALGORITHMS AND PROBLEM COMPLEXITY, Nonnumerical
                 Algorithms and Problems, Sequencing and scheduling.",
}

@InProceedings{Schneider:1989:PEF,
  author =       "Donovan A. Schneider and David J. DeWitt",
  title =        "A performance evaluation of four parallel join
                 algorithms in a shared-nothing multiprocessor
                 environment",
  crossref =     "Clifford:1989:PAS",
  pages =        "110--121",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p110-schneider/p110-schneider.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p110-schneider/",
  abstract =     "In this paper we analyze and compare four parallel
                 join algorithms. Grace and Hybrid hash represent the
                 class of hash-based join methods, Simple hash
                 represents a looping algorithm with hashing, and our
                 last algorithm is the more traditional sort-merge. The
                 performance of each of the algorithms with different
                 tuple distribution policies, the addition of bit vector
                 filters, varying amounts of main-memory for joining,
                 and non-uniformly distributed join attribute values is
                 studied. The Hybrid hash-join algorithm is found to be
                 superior except when the join attribute values of the
                 inner relation are non-uniformly distributed and memory
                 is limited. In this case, a more conservative algorithm
                 such as the sort-merge algorithm should be used. The
                 Gamma database machine serves as the host for the
                 performance comparison.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Experimentation; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Mathematics of Computing --- Numerical Analysis ---
                 General (G.1.0): {\bf Parallel algorithms}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Concurrency}; Data --- Data Storage
                 Representations (E.2): {\bf Hash-table
                 representations}",
}

@InProceedings{Wang:1989:CCT,
  author =       "K. Wang",
  title =        "Can constant-time-maintainability be more practical?",
  crossref =     "ACM:1989:PPE",
  pages =        "120--127",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p120-wang/p120-wang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p120-wang/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p120-wang/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design. {\bf F.4.2} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Grammars and
                 Other Rewriting Systems, Decision problems. {\bf H.2.4}
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Query processing.",
}

@InProceedings{Carey:1989:PCC,
  author =       "Michael J. Carey and Miron Livny",
  title =        "Parallelism and concurrency control performance in
                 distributed database machines",
  crossref =     "Clifford:1989:PAS",
  pages =        "122--133",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p122-carey/p122-carey.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p122-carey/",
  abstract =     "While several distributed (or `shared nothing')
                 database machines exist in the form of prototypes or
                 commercial products, and a number of distributed
                 concurrency control algorithms are available, the
                 effect of parallelism on concurrency control
                 performance has received little attention. This paper
                 examines the interplay between parallelism and
                 transaction performance in a distributed database
                 machine context. Four alternative concurrency control
                 algorithms are considered, including two-phase locking,
                 wound-wait, basic timestamp ordering, and optimistic
                 concurrency control. Issues addressed include how
                 performance scales as a function of machine size and
                 the degree to which partitioning the database for
                 intra-transaction parallelism improves performance for
                 the different algorithms. We examine performance from
                 several perspectives, including response time,
                 throughput, and speedup, and we do so over a fairly
                 wide range of system loads. We also examine the
                 performance impact of certain important overhead
                 factors (e.g., communication and process initiation
                 costs) on the four alternative concurrency control
                 algorithms.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Experimentation; Languages; Measurement;
                 Performance; Reliability",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Mathematics of Computing --- Numerical Analysis ---
                 General (G.1.0): {\bf Parallel algorithms}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Concurrency}; Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf SQL}; Computer
                 Systems Organization --- Performance of Systems (C.4):
                 {\bf Performance attributes}",
}

@InProceedings{Mannila:1989:PAF,
  author =       "H. Mannila and K.-J. Raiha",
  title =        "Practical algorithms for finding prime attributes and
                 testing normal forms",
  crossref =     "ACM:1989:PPE",
  pages =        "128--133",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p128-mannila/p128-mannila.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p128-mannila/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p128-mannila/",
  abstract =     "Several decision problems for relational schemas with
                 functional dependencies are computationally hard. Such
                 problems include determining whether an attribute is
                 prime and testing if a schema is in normal form.
                 Algorithms for these problems are needed in database
                 design tools. The problems can be solved by trivial
                 exponential algorithms. Although the size of the
                 instance is usually given by the number of attributes
                 and hence is fairly small, such exponential algorithms
                 are not usable for all design tasks. We give algorithms
                 for these problems whose running time is polynomial in
                 the number of maximal sets not determining an attribute
                 or, equivalently, the number of generators of the
                 family of closed attribute sets. There is theoretical
                 and practical evidence that this quantity is small for
                 the schemas occurring in practice and exponential only
                 for pathological schemas. The algorithms are simple to
                 implement and fast in practice. They are in use in the
                 relational database design tool Design-By-Example.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  keywords =     "algorithms; design; theory",
  subject =      "{\bf H.2.1} Information Systems, DATABASE MANAGEMENT,
                 Logical Design, Data models. {\bf F.4.2} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Grammars and Other Rewriting Systems, Decision
                 problems. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Computational logic.",
}

@InProceedings{Elkan:1989:DPC,
  author =       "C. Elkan",
  title =        "A decision procedure for conjunctive query
                 disjointness",
  crossref =     "ACM:1989:PPE",
  pages =        "134--139",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p134-elkan/p134-elkan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p134-elkan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p134-elkan/",
  abstract =     "This paper presents an algorithm that decides whether
                 two conjunctive query expressions always describe
                 disjoint sets of tuples. The decision procedure solves
                 an open problem identified by Blakeley, Coburn, and
                 Larson: how to check whether an explicitly stored view
                 relation must be recomputed after an update, taking
                 into account functional dependencies. For
                 nonconjunctive queries, the disjointness problem is
                 {\em NP\/} -hard. For conjunctive queries, the time
                 complexity of the algorithm given cannot be improved
                 unless the reachability problem for directed graphs can
                 be solved in sublinear time. The algorithm is novel in
                 that it combines separate decision procedures for the
                 theory of functional dependencies and for the theory of
                 dense orders. Also, it uses tableaux that are capable
                 of representing all six comparison operators , , =, , ,
                 and .",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  keywords =     "design",
  subject =      "{\bf F.4.2} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Grammars and Other Rewriting
                 Systems, Decision problems. {\bf F.1.3} Theory of
                 Computation, COMPUTATION BY ABSTRACT DEVICES,
                 Complexity Measures and Classes. {\bf G.2.2}
                 Mathematics of Computing, DISCRETE MATHEMATICS, Graph
                 Theory. {\bf H.2.4} Information Systems, DATABASE
                 MANAGEMENT, Systems, Query processing.",
}

@InProceedings{Kifer:1989:FLH,
  author =       "Michael Kifer and Georg Lausen",
  title =        "{F}-logic: a higher-order language for reasoning about
                 objects, inheritance, and scheme",
  crossref =     "Clifford:1989:PAS",
  pages =        "134--146",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p134-kifer/p134-kifer.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p134-kifer/",
  abstract =     "We propose a database logic which accounts in a clean
                 declarative fashion for most of the ``object-oriented''
                 features such as object identity, complex objects,
                 inheritance, methods, etc. Furthermore, database schema
                 is part of the object language, which allows the user
                 to browse schema and data using the same declarative
                 formalism. The proposed logic has a formal semantics
                 and a sound and complete resolution-based proof
                 procedure, which makes it also computationally
                 attractive.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Verification",
  subject =      "Software --- Programming Languages --- Language
                 Classifications (D.3.2): {\bf Specialized application
                 languages}; Computing Methodologies --- Artificial
                 Intelligence --- Knowledge Representation Formalisms
                 and Methods (I.2.4): {\bf Semantic networks};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Data description languages (DDL)}; Software ---
                 Programming Languages --- Formal Definitions and Theory
                 (D.3.1): {\bf Semantics}",
}

@InProceedings{Ullman:1989:BBT,
  author =       "J. D. Ullman",
  title =        "Bottom-up beats top-down for datalog",
  crossref =     "ACM:1989:PPE",
  pages =        "140--149",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p140-ullman/p140-ullman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p140-ullman/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p140-ullman/",
  abstract =     "We show that for any safe datalog program {$P$} 1 and
                 any query {$Q$} (predicate of {$P$} 1 with some bound
                 arguments), there is another safe datalog program {$P$}
                 2 that produces the answer to {$Q$} and takes no more
                 time when evaluated by semi-naive evaluation than when
                 {$P$} 1 is evaluated topdown.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  keywords =     "algorithms; design; languages; theory",
  subject =      "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Lambda
                 calculus and related systems. {\bf H.2.4} Information
                 Systems, DATABASE MANAGEMENT, Systems, Query
                 processing. {\bf H.2.3} Information Systems, DATABASE
                 MANAGEMENT, Languages, Datalog.",
}

@InProceedings{Hull:1989:AOO,
  author =       "Richard Hull and Jianwen Su",
  title =        "On accessing object-oriented databases: expressive
                 power, complexity, and restrictions",
  crossref =     "Clifford:1989:PAS",
  pages =        "147--158",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p147-hull/p147-hull.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p147-hull/",
  abstract =     "A formal framework for studying the expressive power
                 and complexity of OODB queries is developed. Three
                 approaches to modeling sets are articulated and
                 compared. The class of {\em regular\/} OODB schemas
                 supports the explicit representation of set-valued
                 types. Using an {\em object-based\/} semantics for
                 sets, the regular schemas correspond to most
                 implemented OODB systems in the literature; a {\em
                 value-based\/} semantics for sets is also introduced.
                 Without restrictions, both of these approaches support
                 the specification of all computable queries. Assuming
                 that the new operator is prohibited, the query language
                 of the regular OODB schemas under the object-based
                 semantics is complete in PSPACE; and under the
                 value-based semantics it has hyper-exponential
                 complexity. The third approach to modeling sets is
                 given by the {\em algebraic OODB\/} model, in which
                 multi-valued attributes rather than set-valued types
                 are supported. method implementations can use operators
                 stemming from the relational algebra, and do not have
                 side-effects. The query language of algebraic OODBs is
                 more powerful than the relational algebra but has
                 complexity bounded by PTIME. The expressive power and
                 complexity of data access for other variations of OODBs
                 are also considered. Finally, a new relational query
                 language, called {\em algebra\/} + {\em pointwise
                 recursion}, is introduced. This is equivalent to the
                 algebraic OODB language, and can compute generalized
                 transitive closure.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Search and Retrieval (H.3.3): {\bf Query
                 formulation}; Software --- Programming Languages ---
                 Formal Definitions and Theory (D.3.1): {\bf Semantics};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2): {\bf
                 Algebraic approaches to semantics}",
}

@InProceedings{Seki:1989:PAT,
  author =       "H. Seki",
  title =        "On the power of {Alexander} templates",
  crossref =     "ACM:1989:PPE",
  pages =        "150--159",
  year =         "1989",
  bibdate =      "Wed Oct 25 08:47:34 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p150-seki/p150-seki.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p150-seki/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p150-seki/",
  acknowledgement = ack-nhfb,
  keywords =     "algorithms; design; languages; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf I.2.1} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Applications
                 and Expert Systems. {\bf F.1.3} Theory of Computation,
                 COMPUTATION BY ABSTRACT DEVICES, Complexity Measures
                 and Classes. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Lambda calculus and related systems. {\bf H.2.3}
                 Information Systems, DATABASE MANAGEMENT, Languages,
                 Datalog.",
}

@InProceedings{Abiteboul:1989:OIQ,
  author =       "Serge Abiteboul and Paris C. Kanellakis",
  title =        "Object identity as a query language primitive",
  crossref =     "Clifford:1989:PAS",
  pages =        "159--173",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p159-abiteboul/p159-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p159-abiteboul/",
  abstract =     "We demonstrate the power of object identities (oid's)
                 as a database query language primitive. We develop an
                 object-based data model, whose structural part
                 generalizes most of the known complex-object data
                 models: cyclicity is allowed in both its schemas and
                 instances. Our main contribution is the operational
                 part of the data model, the query language IQL, which
                 uses oid's for three critical purposes: (1) to
                 represent data-structures with sharing and cycles, (2)
                 to manipulate sets and (3) to express any computable
                 database query. IQL can be statically type checked, can
                 be evaluated bottom-up and naturally generalizes most
                 popular rule-based database languages. The model can
                 also be extended to incorporate type inheritance,
                 without changes to IQL. Finally, we investigate an
                 analogous value-based data model, whose structural part
                 is founded on regular infinite trees and whose
                 operational part is IQL.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Software --- Programming
                 Languages --- Language Constructs and Features (D.3.3):
                 {\bf Modules, packages}; Software --- Programming
                 Languages --- Formal Definitions and Theory (D.3.1):
                 {\bf Semantics}",
}

@InProceedings{Sagiv:1989:SDQ,
  author =       "Y. Sagiv and M. Y. Vardi",
  title =        "Safety of datalog queries over infinite databases",
  crossref =     "ACM:1989:PPE",
  pages =        "160--171",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p160-sagiv/p160-sagiv.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p160-sagiv/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p160-sagiv/",
  abstract =     "A query is {\em safe\/} with respect to a set of
                 constraints if for every database that satisfies the
                 constraints the query is guaranteed to yield a finite
                 set of answers. We study here the safety problem for
                 Datalog programs with respect to {\em finiteness
                 constraints}. We show that safety can be viewed as a
                 combination of two properties: {\em weak safety}, which
                 guarantees the finiteness of intermediate answers, and
                 {\em termination}, which guarantees the finiteness of
                 the evaluation. We prove that while weak safety is
                 decidable, termination is not. We then consider {\em
                 monadic\/} programs, i.e., programs in which all
                 intensional predicates are monadic, and show that
                 safety is decidable in polynomial time for monadic
                 programs. While we do not settle the safety problem, we
                 show that a closely related problem, the decision
                 problem for safety with respect to {\em functional
                 dependencies}, is undecidable even for monadic
                 programs.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  keywords =     "algorithms; design; languages; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.4.2} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Grammars and Other Rewriting Systems, Decision
                 problems. {\bf H.2.3} Information Systems, DATABASE
                 MANAGEMENT, Languages, Datalog.",
}

@InProceedings{Ramakrishnan:1989:PTT,
  author =       "R. Ramakrishnan and Y. Sagiv and J. D. Ullman and M.
                 Y. Vardi",
  title =        "Proof-tree transformation theorems and their
                 applications",
  crossref =     "ACM:1989:PPE",
  pages =        "172--181",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p172-ramakrishnan/p172-ramakrishnan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p172-ramakrishnan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p172-ramakrishnan/",
  abstract =     "For certain sets of logical rules, one can demonstrate
                 that for every proof tree there is another tree proving
                 the same fact and having a special form. One technique
                 for detecting such opportunities is to reduce the
                 question to one of conjunctive-query containment. A
                 more powerful technique is to test whether one
                 conjunctive query is contained in the infinite union of
                 conjunctive queries formed by expanding a set of
                 recursive rules. We discuss two applications of these
                 techniques. First, we give tests for commutativity of
                 linear rules. When linear rules commute, we can reduce
                 the complexity of ``counting'' methods for query
                 evaluation from exponential to polynomial;
                 commutativity also implies separability in the sense of
                 Naughton. A second application is the discovery of
                 linear rules that are equivalent to given nonlinear
                 rules.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  keywords =     "algorithms; design; theory",
  subject =      "{\bf F.4.1} Theory of Computation, MATHEMATICAL LOGIC
                 AND FORMAL LANGUAGES, Mathematical Logic, Lambda
                 calculus and related systems. {\bf H.2.4} Information
                 Systems, DATABASE MANAGEMENT, Systems, Query
                 processing.",
}

@InProceedings{Chomicki:1989:RSI,
  author =       "Jan Chomicki and Tomasz Imieli{\'n}ski",
  title =        "Relational specifications of infinite query answers",
  crossref =     "Clifford:1989:PAS",
  pages =        "174--183",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p174-chomicki/p174-chomicki.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p174-chomicki/",
  abstract =     "We investigate here functional deductive databases: an
                 extension of DATALOG capable of representing infinite
                 phenomena. Rules in functional deductive databases are
                 Horn and predicates can have arbitrary unary and
                 limited $k$-ary function symbols in one fixed position.
                 This class is known to be decidable. However, least
                 fixpoints of functional rules may be infinite. We
                 present here a method to finitely represent infinite
                 least fixpoints and infinite query answers as {\em
                 relational specifications}. Relational specifications
                 consist of a finite set of tuples and of a finitely
                 specified congruence relation. Our method is applicable
                 to every domain-independent set of functional rules.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Graph algorithms}; Theory of
                 Computation --- Computation by Abstract Devices ---
                 Complexity Measures and Classes (F.1.3); Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Computing Methodologies ---
                 Artificial Intelligence --- Natural Language Processing
                 (I.2.7): {\bf DATALOG}; Computing Methodologies ---
                 Artificial Intelligence --- Deduction and Theorem
                 Proving (I.2.3)",
}

@InProceedings{Saraiya:1989:LNR,
  author =       "Y. P. Saraiya",
  title =        "Linearising nonlinear recursions in polynomial time",
  crossref =     "ACM:1989:PPE",
  pages =        "182--189",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p182-saraiya/p182-saraiya.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p182-saraiya/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p182-saraiya/",
  abstract =     "The replacement of nonlinear recursions with
                 equivalent linear recursions is a potentially useful
                 query optimization strategy, since it permits the use
                 of efficient algorithms for the evaluation of linear
                 logic programs. We show that a member of a certain
                 class of bilinear recursions is linearizable in a
                 strong sense if and only if a specific partial proof
                 tree derived from this recursion is contained in a
                 bounded number of partial proof trees generated by the
                 recursion. Further, while each such test of containment
                 between proof trees involves an exponential number of
                 conjunctive-query containment tests, we present
                 syntactic conditions on the recursion that are
                 necessary and sufficient for the containment and
                 verifiable in polynomial time.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  keywords =     "design; languages; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.4.1} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic. {\bf H.2.3} Information Systems,
                 DATABASE MANAGEMENT, Languages, Datalog.",
}

@InProceedings{Sun:1989:SIP,
  author =       "Xian-He Sun and Nabil Kamel and Lionel M. Ni",
  title =        "Solving implication problems in database
                 applications",
  crossref =     "Clifford:1989:PAS",
  pages =        "185--192",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p185-sun/p185-sun.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p185-sun/",
  abstract =     "Computing queries from derived relations, optimizing
                 queries from a group of queries, and updating
                 materialized views are important database problems and
                 have attracted much attention. One thing common to
                 these problems is their demand to quickly solve the
                 implication problem -- given two predicates {$Q$} and
                 ??, can {$Q$} imply ({$Q$})? The implication problem
                 has been solved by converting it into a satisfiability
                 problem. Based on a graph representation, a detailed
                 study of the general implication problem on its own is
                 presented in this paper. We proved that the general
                 implication problem, in which all six comparison
                 operators: =, , , , , , as well as conjunctions and
                 disjunctions are allowed, is NP-hard. In the case when
                 ``'' operators are not allowed in {$Q$} and
                 disjunctions are not allowed in , a polynomial time
                 algorithm is proposed to solve this restricted
                 implication problem. The influence of the ``'' operator
                 and disjunctions are studied. Our theoretical results
                 show that for some special cases the polynomial
                 complexity algorithm can solve the implication problem
                 which allows the ``'' operator or disjunctions in the
                 predicates. Necessary conditions for detecting when the
                 ``'' operator and disjunctions are allowed are also
                 given. These results are very useful in creating
                 heuristic methods.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Concurrency}; Theory of Computation --- Analysis of
                 Algorithms and Problem Complexity --- Numerical
                 Algorithms and Problems (F.2.1): {\bf Computations on
                 polynomials}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2): {\bf Graph
                 algorithms}",
}

@InProceedings{Brodsky:1989:IMC,
  author =       "A. Brodsky and Y. Sagiv",
  title =        "Inference of monotonicity constraints in datalog
                 programs",
  crossref =     "ACM:1989:PPE",
  pages =        "190--199",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p190-brodsky/p190-brodsky.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p190-brodsky/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p190-brodsky/",
  abstract =     "Datalog (i.e., function-free logic) programs with
                 monotonicity constraints on extensional predicates are
                 considered. A monotonicity constraint states that one
                 argument of a predicate is always less than another
                 argument, according to some partial order. Relations of
                 an extensional database are required to satisfy the
                 monotonicity constraints imposed on their predicates.
                 More specifically, a partial order is defined on the
                 domain (i.e., set of constants) of the database, and
                 every tuple of each relation satisfies the monotonicity
                 constraints imposed on its predicate. An algorithm is
                 given for inferring all monotonicity constraints that
                 hold in relations of the intensional database from
                 monotonicity constraints that hold in the extensional
                 database. A complete inference algorithm is also given
                 for disjunctions of monotonicity and equality
                 constraints. It is shown that the inference of
                 monotonicity constraints in programs is a complete
                 problem for exponential time. For linear programs, this
                 problem is complete for polynomial space.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  keywords =     "design; languages; theory",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Datalog. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Lambda calculus and related systems. {\bf I.2.1}
                 Computing Methodologies, ARTIFICIAL INTELLIGENCE,
                 Applications and Expert Systems. {\bf I.2.3} Computing
                 Methodologies, ARTIFICIAL INTELLIGENCE, Deduction and
                 Theorem Proving, Nonmonotonic reasoning and belief
                 revision.",
}

@InProceedings{Bry:1989:TEE,
  author =       "Fran{\c{c}}ois Bry",
  title =        "Towards an efficient evaluation of general queries:
                 quantifier and disjunction processing revisited",
  crossref =     "Clifford:1989:PAS",
  pages =        "193--204",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p193-bry/p193-bry.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p193-bry/",
  abstract =     "{\em Database applications often require to evaluate
                 queries containing quantifiers or disjunctions, e.g.,
                 for handling general integrity constraints. Existing
                 efficient methods for processing quantifiers depart
                 from the relational model as they rely on non-algebraic
                 procedures. Looking at quantified query evaluation from
                 a new angle, we propose an approach to process
                 quantifiers that makes use of relational algebra
                 operators only. Our approach performs in two phases.
                 The first phase normalizes the queries producing a
                 canonical form. This form permits to improve the
                 translation into relational algebra performed during
                 the second phase. The improved translation relies on a
                 new operator - the\/} complement-join - {\em that
                 generalizes the set difference, on algebraic
                 expressions of universal quantifiers that avoid the
                 expensive division operator in many cases, and on a
                 special processing of disjunctions by means of\/}
                 constrained outer-joins. {\em Our method achieves an
                 efficiency at least comparable with that of previous
                 proposals, better in most cases. Furthermore, it is
                 considerably simpler to implement as it completely
                 relies on relational data structures and operators}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Standardization; Theory",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}; Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Query
                 languages}",
}

@InProceedings{Cohen:1989:WSP,
  author =       "S. Cohen and O. Wolfson",
  title =        "Why a single parallelization strategy is not enough in
                 knowledge bases",
  crossref =     "ACM:1989:PPE",
  pages =        "200--216",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p200-cohen/p200-cohen.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p200-cohen/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p200-cohen/",
  abstract =     "We argue that the appropriate parallelization strategy
                 for logic-program evaluation depends on the program
                 being evaluated. Therefore, this paper is concerned
                 with the issues of program-classification, and
                 parallelization-strategies. We propose five
                 parallelization strategies that differ based on the
                 following criteria. Their evaluation cost, the overhead
                 of communication and synchronization among processors,
                 and the programs to which they are applicable. In
                 particular, we start our study with
                 pure-parallelization, i.e., parallelization without
                 overhead. An interesting class-structure of logic
                 programs is demonstrated, when considering amenability
                 to pure-parallelization. The relationship to the NC
                 complexity class is discussed. Then we propose
                 strategies that do incur an overhead, but are optimal
                 in a sense that will be precisely defined. \par

                 This paper makes the initial steps towards a theory of
                 parallel logic-programming.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  keywords =     "design; languages; theory",
  subject =      "{\bf I.2.1} Computing Methodologies, ARTIFICIAL
                 INTELLIGENCE, Applications and Expert Systems. {\bf
                 F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT
                 DEVICES, Modes of Computation, Parallelism and
                 concurrency. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Lambda calculus and related systems. {\bf H.2.0}
                 Information Systems, DATABASE MANAGEMENT, General.",
}

@InProceedings{Ioannidis:1989:CRR,
  author =       "Yannis E. Ioannidis and Timos K. Sellis",
  title =        "Conflict resolution of rules assigning values to
                 virtual attributes",
  crossref =     "Clifford:1989:PAS",
  pages =        "205--214",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p205-ioannidis/p205-ioannidis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p205-ioannidis/",
  abstract =     "In the majority of research work done on logic
                 programming and deductive databases, it is assumed that
                 the set of rules defined by the user is {\em
                 consistent}, i.e., that no contradictory facts can be
                 inferred by the rules. In this paper, we address the
                 problem of resolving conflicts of rules that assign
                 values to virtual attributes. We devise a general
                 framework for the study of the problem, and we propose
                 an approach that subsumes all previously suggested
                 solutions. Moreover, it suggests several additional
                 solutions, which very often capture the semantics of
                 the data more accurately than the known approaches.
                 Finally, we address the issue of how to index rules so
                 that conflicts are resolved efficiently, i.e., only one
                 of the applicable rules is processed at query time.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3): {\bf Logic
                 programming}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Mathematical Logic
                 (F.4.1): {\bf Logic and constraint programming};
                 Information Systems --- Database Management ---
                 Languages (H.2.3); Information Systems --- Database
                 Management --- General (H.2.0)",
}

@InProceedings{McCarthy:1989:AAD,
  author =       "Dennis McCarthy and Umeshwar Dayal",
  title =        "The architecture of an active database management
                 system",
  crossref =     "Clifford:1989:PAS",
  pages =        "215--224",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p215-mccarthy/p215-mccarthy.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p215-mccarthy/",
  abstract =     "The HiPAC project is investigating active,
                 time-constrained database management. An active DBMS is
                 one which automatically executes specified actions when
                 specified conditions arise. HiPAC has proposed
                 Event-Condition-Action (ECA) rules as a formalism for
                 active database capabilities. We have also developed an
                 execution model that specifies how these rules are
                 processed in the context of database transactions. The
                 additional functionality provided by ECA rules makes
                 new demands on the design of an active DBMS. In this
                 paper we propose an architecture for an active DBMS
                 that supports ECA rules. This architecture provides new
                 forms of interaction, in support of ECA rules, between
                 application programs and the DBMS. This leads to a new
                 paradigm for constructing database applications.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management ---
                 Database Applications (H.2.8); Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf SQL};
                 Computing Methodologies --- Artificial Intelligence ---
                 Knowledge Representation Formalisms and Methods
                 (I.2.4): {\bf Representations (procedural and
                 rule-based)}",
}

@InProceedings{Sector:1989:ITM,
  author =       "A. Z. Sector",
  title =        "Invited talk: modular architectures for distributed
                 and databases systems",
  crossref =     "ACM:1989:PPE",
  pages =        "217--224",
  year =         "1989",
  bibdate =      "Thu Mar 12 18:48:02 MST 1998",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p217-sector/",
  acknowledgement = ack-nhfb,
  keywords =     "design; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf H.2.6} Information
                 Systems, DATABASE MANAGEMENT, Database Machines. {\bf
                 C.0} Computer Systems Organization, GENERAL, Systems
                 specification methodology. {\bf C.2.0} Computer Systems
                 Organization, COMPUTER-COMMUNICATION NETWORKS,
                 General.",
}

@InProceedings{Spector:1989:ITM,
  author =       "A. Z. Spector",
  title =        "Invited talk: modular architectures for distributed
                 and databases systems",
  crossref =     "ACM:1989:PPE",
  pages =        "217--224",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p217-spector/p217-spector.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p217-spector/",
  abstract =     "This paper describes the importance of modularity in
                 systems and lists a number of reasons why systems will
                 become increasingly modular. It describes two strawmen
                 architecture models for systems and distributed
                 databases in order to illustrate the hierarchical
                 decomposition of complex systems. The paper also
                 relates the systems model to the layering achieved in a
                 few systems familiar to the author.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Database Machines
                 (H.2.6); Computer Systems Organization --- General
                 (C.0): {\bf Systems specification methodology};
                 Computer Systems Organization ---
                 Computer-Communication Networks --- General (C.2.0)",
}

@InProceedings{Cohen:1989:CCD,
  author =       "D. Cohen",
  title =        "Compiling complex database transition triggers",
  crossref =     "Clifford:1989:PAS",
  pages =        "225--234",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p225-cohen/p225-cohen.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p225-cohen/",
  abstract =     "This paper presents a language for specifying database
                 updates, queries and rule triggers, and describes how
                 triggers can be compiled into an efficient mechanism.
                 The rule language allows specification of both state
                 and transition constraints as special cases, but is
                 more general than either. The implementation we
                 describe compiles rules and updates independently of
                 each other. Thus rules can be added or deleted without
                 recompiling any update program and vice versa.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8); Software --- Programming
                 Languages --- Processors (D.3.4): {\bf Compilers};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Data manipulation languages
                 (DML)}; Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}",
}

@InProceedings{Rotem:1989:CMH,
  author =       "D. Rotem",
  title =        "Clustered multiattribute hash files",
  crossref =     "ACM:1989:PPE",
  pages =        "225--234",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p225-rotem/p225-rotem.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p225-rotem/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p225-rotem/",
  abstract =     "Access methods for multidimensional data have
                 attracted much research interest in recent years. In
                 general, the data structures proposed for this problem
                 partition the database into a set of disk pages
                 (buckets). Access to the buckets is provided by
                 searching a directory of some type such as a tree
                 directory or inverted index or by computation of a
                 multiattribute hash function. Examples of the first
                 approach are Multidimensional B-trees[Sch82], K-D-B
                 trees[Rob81] (see also [Sam84] for a survey of these
                 methods) whereas multiattribute hashing methods are
                 described for example in [Rot74],[Aho79],[Riv76] and
                 [Ram83]. In addition, there are also hybrid methods
                 which combine hashing with a directory of some type
                 [Ore84],[Nie84], [Fag79]. \par

                 In all the work mentioned above, the performance is
                 measured in terms of the number of disk accesses made
                 to retrieve the answer without distinguishing whether
                 these are sequential or random. We argue that
                 performance measurements must consider this factor in
                 order to be realistic, especially in the single user
                 environment. Some evidence to support this claim is
                 given in [Sal88, pg. 22] with the IBM 3380 disk drive
                 as an example. For this type of disk, a comparison
                 between accessing $m$ blocks randomly and accessing a
                 contiguous cluster of $m$ blocks is made. The results
                 show that for $m$ = 10, the random access is slower by
                 a factor of about 8 than the clustered one whereas for
                 $m$ = 100 it is slower by a factor of 25. \par

                 Another motivation for this work are optical disks. In
                 this case, there is a big advantage in clustering since
                 the access mechanism on many of these drives is
                 equipped with an adjustable mirror which allows slight
                 deflections of the laser beam. This means that it may
                 be possible to read a complete cluster from a sequence
                 of adjacent tracks beneath the head with a single
                 random seek [Chri88]. \par

                 Our work is inspired by an interesting recent paper
                 [Fal86] which proposes to organize the physical layout
                 of a multiattribute hash file by encoding record
                 signatures using gray code rather than simple binary
                 code. In this way neighboring buckets contain records
                 which differ on a single bit in their signatures. It is
                 then proved that the records which form the answer to a
                 partial match query will tend to be contained in a
                 smaller number of clusters as compared with the binary
                 arrangement. It is also shown that this idea is
                 applicable to many other multiattribute hashing schemes
                 with a small amount of overhead. In addition, it can
                 improve access time to directories of grid type files,
                 extendible hashing and file methods which employ the
                 z-ordering [Ore84].",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Measurement; Performance; Theory",
  keywords =     "design; measurement; performance; theory",
  subject =      "{\bf E.2} Data, DATA STORAGE REPRESENTATIONS,
                 Hash-table representations. {\bf E.5} Data, FILES. {\bf
                 E.1} Data, DATA STRUCTURES. {\bf H.2.7} Information
                 Systems, DATABASE MANAGEMENT, Database Administration,
                 Data dictionary/directory.",
}

@InProceedings{Naughton:1989:EER,
  author =       "J. F. Naughton and R. Ramakrishnan and Y. Sagiv and J.
                 D. Ullman",
  title =        "Efficient evaluation of right-, left-, and
                 multi-linear rules",
  crossref =     "Clifford:1989:PAS",
  pages =        "235--242",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p235-naughton/p235-naughton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p235-naughton/",
  abstract =     "We present an algorithm for the efficient evaluation
                 of a useful subset of recursive queries. Like the magic
                 sets transformation, the algorithm consists of a
                 rewriting phase followed by semi-naive bottom-up
                 evaluation of the resulting rules. We prove that on a
                 wide range of recursions, this algorithm achieves a
                 factor of ($n$) speedup over magic sets. Intuitively,
                 the transformations in this algorithm achieve their
                 performance by reducing the arity of the recursive
                 predicates in the transformed rules.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Formal Languages (F.4.3): {\bf Classes defined by
                 grammars or automata}",
}

@InProceedings{Johnson:1989:UBT,
  author =       "T. Johnson and D. Shasha",
  title =        "Utilization of {B}-trees with inserts, deletes and
                 modifies",
  crossref =     "ACM:1989:PPE",
  pages =        "235--246",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p235-johnson/p235-johnson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p235-johnson/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p235-johnson/",
  abstract =     "The utilization of B-tree nodes determines the number
                 of levels in the B-tree and hence its performance.
                 Until now, the only analytical aid to the determination
                 of a B-tree's utilization has been the analysis by Yao
                 and related work. Yao showed that the utilization of
                 B-tree nodes under pure inserts was 69\%. We derive
                 analytically and verify by simulation the utilization
                 of B-tree nodes constructed from $n$ inserts followed
                 by $M$ modifies (where $M ? N$), where each modify is a
                 delete followed by an insert. Assuming that nodes only
                 merge when they are empty (the technique used in most
                 database management systems), we show that the
                 utilization is 39\% as M becomes large. We extend this
                 model to a parameterized mixture of inserts and
                 modifies. Surprisingly, if the modifies are mixed with
                 just 10\% inserts, then the utilization is over 62\%. We
                 also calculated the probability of splitting and
                 merging. We derive a simple rule-of-thumb that
                 accurately calculates the probability of splitting. We
                 present two models for computing this utilization, the
                 more accurate of which remembers items inserted and
                 then deleted in a node --- we call such items ghosts.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Experimentation",
  keywords =     "algorithms; experimentation",
  subject =      "Data --- Data Structures (E.1): {\bf Trees};
                 Information Systems --- Information Storage and
                 Retrieval --- Content Analysis and Indexing (H.3.1):
                 {\bf Indexing methods}",
}

@InProceedings{Larson:1989:FSS,
  author =       "P.-A. Larson and V. Deshpande",
  title =        "A file structure supporting traversal recursion",
  crossref =     "Clifford:1989:PAS",
  pages =        "243--252",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p243-larson/p243-larson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p243-larson/",
  abstract =     "Traversal recursion is a class of recursive queries
                 where the evaluation of the query involves traversal of
                 a graph or a tree. This limited type of recursion
                 arises in many applications. In this report we
                 investigate a simple file structure that efficiently
                 supports traversal recursion over large, acyclic
                 graphs. The nodes of the graph are sorted in
                 topological order and stored in a B-tree. Hence,
                 traversal of the graph can be done in a single scan.
                 Nodes and edges can also be inserted, deleted, and
                 modified efficiently.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Theory",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2): {\bf Graph
                 algorithms}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2): {\bf Trees};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@InProceedings{Faloutsos:1989:FSK,
  author =       "C. Faloutsos and S. Roseman",
  title =        "Fractals for secondary key retrieval",
  crossref =     "ACM:1989:PPE",
  pages =        "247--252",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p247-faloutsos/p247-faloutsos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p247-faloutsos/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p247-faloutsos/",
  abstract =     "In this paper we propose the use of fractals and
                 especially the Hilbert curve, in order to design good
                 distance-preserving mappings. Such mappings improve the
                 performance of secondary-key- and spatial- access
                 methods, where multi-dimensional points have to be
                 stored on a 1-dimensional medium (e.g., disk). Good
                 clustering reduces the number of disk accesses on
                 retrieval, improving the response time. Our experiments
                 on range queries and nearest neighbor queries showed
                 that the proposed Hilbert curve achieves better
                 clustering than older methods (``bit-shuffling'', or
                 Peano curve), for every situation we tried.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Experimentation; Performance; Theory",
  keywords =     "design; experimentation; performance; theory",
  subject =      "{\bf H.2.2} Information Systems, DATABASE MANAGEMENT,
                 Physical Design, Access methods. {\bf H.3.3}
                 Information Systems, INFORMATION STORAGE AND RETRIEVAL,
                 Information Search and Retrieval. {\bf H.2.4}
                 Information Systems, DATABASE MANAGEMENT, Systems,
                 Query processing.",
}

@InProceedings{Faloutsos:1989:DUE,
  author =       "C. Faloutsos and D. Metaxas",
  title =        "Declustering using error correcting codes",
  crossref =     "ACM:1989:PPE",
  pages =        "253--258",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p253-faloutsos/p253-faloutsos.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p253-faloutsos/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p253-faloutsos/",
  abstract =     "The problem examined is to distribute a binary
                 Cartesian product file on multiple disks to maximize
                 the parallelism for partial match queries. Cartesian
                 product files appear as a result of some secondary key
                 access methods, such as the multiattribute hashing
                 [10], the grid file [6] etc.. For the binary case, the
                 problem is reduced into grouping the 2 $n$ binary
                 strings on $n$ bits in $m$ groups of unsimilar strings.
                 The main idea proposed in this paper is to group the
                 strings such that the group forms an Error Correcting
                 Code (ECC). This construction guarantees that the
                 strings of a given group will have large Hamming
                 distances, i.e., they will differ in many bit
                 positions. Intuitively, this should result into good
                 declustering. We briefly mention previous heuristics
                 for declustering, we describe how exactly to build a
                 declustering scheme using an ECC, and we prove a
                 theorem that gives a necessary condition for our method
                 to be optimal. Analytical results show that our method
                 is superior to older heuristics, and that it is very
                 close to the theoretical (non-tight) bound.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  keywords =     "design; performance",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf E.5} Data, FILES. {\bf
                 E.2} Data, DATA STORAGE REPRESENTATIONS, Hash-table
                 representations. {\bf H.2.2} Information Systems,
                 DATABASE MANAGEMENT, Physical Design, Access methods.",
}

@InProceedings{Agrawal:1989:EMT,
  author =       "R. Agrawal and A. Borgida and H. V. Jagadish",
  title =        "Efficient management of transitive relationships in
                 large data and knowledge bases",
  crossref =     "Clifford:1989:PAS",
  pages =        "253--262",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p253-agrawal/p253-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p253-agrawal/",
  abstract =     "We argue that accessing the transitive closure of
                 relationships is an important component of both
                 databases and knowledge representation systems in
                 Artificial Intelligence. The demands for efficient
                 access and management of large relationships motivate
                 the need for explicitly storing the transitive closure
                 in a compressed and local way, while allowing updates
                 to the base relation to be propagated incrementally. We
                 present a transitive closure compression technique,
                 based on labeling spanning trees with numeric
                 intervals, and provide both analytical and empirical
                 evidence of its efficacy, including a proof of
                 optimality.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Verification",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Knowledge Representation Formalisms and Methods
                 (I.2.4): {\bf Representations (procedural and
                 rule-based)}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Graph algorithms};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2): {\bf Trees}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Query languages}",
}

@InProceedings{Weihl:1989:IRC,
  author =       "W. E. Weihl",
  title =        "The impact of recovery on concurrency control",
  crossref =     "ACM:1989:PPE",
  pages =        "259--269",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p259-weihl/p259-weihl.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p259-weihl/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p259-weihl/",
  abstract =     "It is widely recognized by practitioners that
                 concurrency control and recovery for transaction
                 systems interact in subtle ways. In most theoretical
                 work, however, concurrency control and recovery are
                 treated as separate, largely independent problems. In
                 this paper we investigate the interactions between
                 concurrency control and recovery. We consider two
                 general recovery methods for abstract data types,
                 update-in-place and deferred-update. While each
                 requires operations to conflict if they do not
                 ``commute,'' the two recovery methods require subtly
                 different notions of commutativity. We give a precise
                 characterization of the conflict relations that work
                 with each recovery method, and show that each permits
                 conflict relations that the other does not. Thus, the
                 two recovery methods place incomparable constraints on
                 concurrency control. Our analysis applies to arbitrary
                 abstract data types, including those with operations
                 that may be partial or non-deterministic.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  keywords =     "algorithms; design; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Concurrency. {\bf H.2.4} Information Systems,
                 DATABASE MANAGEMENT, Systems, Transaction processing.
                 {\bf D.3.3} Software, PROGRAMMING LANGUAGES, Language
                 Constructs and Features, Abstract data types. {\bf
                 F.1.2} Theory of Computation, COMPUTATION BY ABSTRACT
                 DEVICES, Modes of Computation, Alternation and
                 nondeterminism.",
}

@InProceedings{Gyssens:1989:GBA,
  author =       "M. Gyssens and J. Paredaens and D. van Gucht",
  title =        "A grammar-based approach towards unifying hierarchical
                 data models",
  crossref =     "Clifford:1989:PAS",
  pages =        "263--272",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p263-gyssens/p263-gyssens.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p263-gyssens/",
  abstract =     "A simple model for representing the hierarchical
                 structure of information is proposed. This model,
                 called the grammatical model, is based on trees that
                 are generated by grammars; the grammars describe the
                 hierarchy of the information represented by the trees.
                 Two transformation languages, an algebra and a
                 calculus, are presented and shown to be equally
                 expressive.",
  acknowledgement = ack-nhfb,
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Mathematics
                 of Computing --- Discrete Mathematics --- Graph Theory
                 (G.2.2): {\bf Trees}; Information Systems --- Database
                 Management --- Database Applications (H.2.8)",
}

@InProceedings{Fu:1989:CCN,
  author =       "A. Fu and T. Kameda",
  title =        "Concurrency control of nested transactions accessing
                 {B}-trees",
  crossref =     "ACM:1989:PPE",
  pages =        "270--285",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p270-fu/p270-fu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p270-fu/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p270-fu/",
  abstract =     "This paper presents a concurrency control algorithm
                 for nested transactions accessing B-trees. It combines
                 the idea of B-link tree with that of resilient 2-phase
                 locking [Mos85b]. The I/O automaton model is used in
                 the specification and proofs of correctness of the
                 system. We define ``strongly-serially correct''
                 schedules and use this property as our correctness
                 criterion.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages; Theory",
  keywords =     "algorithms; languages; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Concurrency. {\bf E.1} Data, DATA STRUCTURES,
                 Trees. {\bf H.2.4} Information Systems, DATABASE
                 MANAGEMENT, Systems, Transaction processing. {\bf E.2}
                 Data, DATA STORAGE REPRESENTATIONS, Hash-table
                 representations. {\bf F.3.2} Theory of Computation,
                 LOGICS AND MEANINGS OF PROGRAMS, Semantics of
                 Programming Languages. {\bf F.1.1} Theory of
                 Computation, COMPUTATION BY ABSTRACT DEVICES, Models of
                 Computation, Automata.",
}

@InProceedings{Colby:1989:RAQ,
  author =       "Latha S. Colby",
  title =        "A recursive algebra and query optimization for nested
                 relations",
  crossref =     "Clifford:1989:PAS",
  pages =        "273--283",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p273-colby/p273-colby.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p273-colby/",
  abstract =     "{\em The nested relational model provides a better way
                 to represent complex objects than the (flat) relational
                 model, by allowing relations to have relation-valued
                 attributes. A recursive algebra for nested relations
                 that allows tuples at all levels of nesting in a nested
                 relation to be accessed and modified without any
                 special navigational operators and without having to
                 flatten the nested relation has been developed. In this
                 algebra, the operators of the nested relational algebra
                 are extended with recursive definitions so that they
                 can be applied not only to relations but also to
                 subrelations of a relation. In this paper, we show that
                 queries are more efficient and succinct when expressed
                 in the recursive algebra than in languages that require
                 restructuring in order to access subrelations of
                 relations. We also show that most of the query
                 optimization techniques that have been developed for
                 the relational algebra can be easily extended for the
                 recursive algebra and that queries are more easily
                 optimizable when expressed in the recursive algebra
                 than when they are expressed in languages like the
                 non-recursive algebra}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Mathematics of Computing ---
                 Numerical Analysis --- Optimization (G.1.6)",
}

@InProceedings{Tansel:1989:NHR,
  author =       "A. U. Tansel and L. Garnett",
  title =        "Nested historical relations",
  crossref =     "Clifford:1989:PAS",
  pages =        "284--294",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p284-tansel/p284-tansel.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p284-tansel/",
  abstract =     "The paper extends nested relations for managing
                 temporal variation of complex objects. It combines the
                 research in temporal databases and nested relations for
                 nontraditional database applications. The basic
                 modelling construct is a temporal atom as an attribute
                 value. A temporal atom consists of two components, a
                 value and temporal set which is a set of times denoting
                 the validity period of the value. We define algebra
                 operations for nested historical relations. Data
                 redundancy in nested historical relations is also
                 discussed and criteria for well-structured nested
                 relations are established.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Database Applications (H.2.8); Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Information Systems --- Information
                 Storage and Retrieval --- Information Storage (H.3.2):
                 {\bf File organization}",
}

@InProceedings{Bonner:1989:HDN,
  author =       "A. J. Bonner",
  title =        "Hypothetical datalog negation and linear recursion",
  crossref =     "ACM:1989:PPE",
  pages =        "286--300",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p286-bonner/p286-bonner.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p286-bonner/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p286-bonner/",
  abstract =     "This paper examines an extension of Horn logic in
                 which rules can add entries to a database
                 hypothetically. Several researchers have developed
                 logical systems along these lines, but the complexity
                 and expressibility of such logics is only now being
                 explored. It has been shown, for instance, that the
                 data-complexity of these logics is {\em PSPACE\/}
                 -complete in the function-free, predicate case. This
                 paper extends this line of research by developing
                 syntactic restrictions with lower complexity. These
                 restrictions are based on two ideas from Horn-clause
                 logic: {\em linear recursion\/} and {\em stratified
                 negation}. In particular, a notion of stratification is
                 developed in which negation-as-failure alternates with
                 linear recursion. The complexity of such rulebases
                 depends on the number of layers of stratification. The
                 result is a hierarchy of syntactic classes which
                 corresponds exactly in the polynomial-time hierarchy of
                 complexity classes. In particular, rulebases with $k$
                 strata are data-complete for {$P$} $h$ . Furthermore,
                 these rulebases provide a complete characterization of
                 the relational queries in {$P$} $h$ . That is, any
                 query whose graph is in {$P$} $h$ can be represented as
                 a set of hypothetical rules with $k$ strata. Unlike
                 other expressibility results in the literature, this
                 result does not require the data domain to be linearly
                 ordered.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Theory",
  keywords =     "algorithms; design; performance; theory",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Datalog. {\bf F.4.1} Theory of Computation,
                 MATHEMATICAL LOGIC AND FORMAL LANGUAGES, Mathematical
                 Logic, Lambda calculus and related systems. {\bf G.2.2}
                 Mathematics of Computing, DISCRETE MATHEMATICS, Graph
                 Theory. {\bf F.1.3} Theory of Computation, COMPUTATION
                 BY ABSTRACT DEVICES, Complexity Measures and Classes.
                 {\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing.",
}

@InProceedings{Orenstein:1989:RSD,
  author =       "J. A. Orenstein",
  title =        "Redundancy in spatial databases",
  crossref =     "Clifford:1989:PAS",
  pages =        "295--305",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p295-orenstein/p295-orenstein.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p295-orenstein/",
  abstract =     "Spatial objects other than points and boxes can be
                 stored in spatial indexes, but the techniques usually
                 require the use of approximations that can be
                 arbitrarily bad. This leads to poor performance and
                 highly inaccurate responses to spatial queries. The
                 situation can be improved by storing some objects in
                 the index redundantly. Most spatial indexes permit no
                 flexibility in adjusting the amount of redundancy.
                 Spatial indexes based on z-order permit this
                 flexibility. Accuracy of the query response increases
                 with redundancy, (there is a ``diminishing return''
                 effect). Search time, as measured by disk accesses
                 first decreases and then increases with redundancy.
                 There is, therefore, an optimal amount of redundancy
                 (for a given data set). The optimal use of redundancy
                 for z-order is explored through analysis of the z-order
                 search algorithm and through experiments.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}",
}

@InProceedings{Lakshmanan:1989:IPG,
  author =       "V. S. Lakshmanan and A. O. Mendelzon",
  title =        "Inductive pebble games and the expressive power of
                 datalog",
  crossref =     "ACM:1989:PPE",
  pages =        "301--310",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p301-lakshmanan/p301-lakshmanan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p301-lakshmanan/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p301-lakshmanan/",
  abstract =     "As an alternative to logic-based query languages for
                 recursive queries, we are investigating a graphical
                 query language called {$G$} +, which allows, among
                 other things, easy formulation of certain queries
                 involving simple paths in directed graphs. This led us
                 to study whether such queries are expressible in
                 DATALOG, the language of function-free Horn clauses.
                 Since some {$G$} + queries are NP-hard, and all DATALOG
                 queries are polynomial time computable, the answer
                 appears to be negative. However, it would be
                 interesting to have proof techniques and tools for
                 settling such questions with certainty. The objective
                 of this paper is the development of one such tool, {\em
                 inductive pebble games}, based on a normal form for
                 DATALOG programs derived here, and its relationship to
                 Alternating Turing Machine computations. As an
                 application, we sketch a proof that the query ``find
                 all pairs of nodes connected by a directed simple path
                 of even length'' cannot be expressed in DATALOG.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Performance",
  keywords =     "design; languages; performance",
  subject =      "{\bf H.2.3} Information Systems, DATABASE MANAGEMENT,
                 Languages, Datalog. {\bf H.2.3} Information Systems,
                 DATABASE MANAGEMENT, Languages, Query languages. {\bf
                 F.4.1} Theory of Computation, MATHEMATICAL LOGIC AND
                 FORMAL LANGUAGES, Mathematical Logic, Lambda calculus
                 and related systems. {\bf G.2.2} Mathematics of
                 Computing, DISCRETE MATHEMATICS, Graph Theory.",
}

@InProceedings{Christodoulakis:1989:RPV,
  author =       "Stavros Christodoulakis and Daniel Alexander Ford",
  title =        "Retrieval performance versus disc space utilization on
                 {WORM} optical discs",
  crossref =     "Clifford:1989:PAS",
  pages =        "306--314",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p306-christodoulakis/p306-christodoulakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p306-christodoulakis/",
  abstract =     "Steady progress in the development of optical disc
                 technology over the past decade has brought it to the
                 point where it is beginning to compete directly with
                 magnetic disc technology. WORM optical discs in
                 particular, which permanently register information on
                 the disc surface, have significant advantages over
                 magnetic technology for applications that are mainly
                 archival in nature but require the ability to do
                 frequent on-line insertions. \par

                 In this paper, we propose a class of access methods
                 that use rewritable storage for the temporary buffering
                 of insertions to data sets stored on WORM optical discs
                 and we examine the relationship between the retrieval
                 performance from WORM optical discs and the utilization
                 of disc storage space when one of these organizations
                 is employed. We describe the performance trade off as
                 one of fast sequential retrieval of the contents of a
                 block versus wasted space owing to data replication. A
                 model of a specific instance of such an organization (a
                 buffered hash file scheme) is described that allows for
                 the specification of retrieval performance objectives.
                 Alternative strategies for managing data replication
                 that allow trade offs between higher consumption rates
                 and better average retrieval performance are also
                 described. We then provide an expected value analysis
                 of the amount of disc space that must be consumed on a
                 WORM disc to meet specified performance limits. The
                 analysis is general enough to allow easy extension to
                 other types of buffered files systems for WORM optical
                 discs.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Access methods};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Storage (H.3.2): {\bf File
                 organization}; Information Systems --- Information
                 Storage and Retrieval --- Information Search and
                 Retrieval (H.3.3): {\bf Retrieval models}",
}

@InProceedings{Cosmadakis:1989:FOE,
  author =       "S. S. Cosmadakis",
  title =        "On the first-order expressibility of recursive
                 queries",
  crossref =     "ACM:1989:PPE",
  pages =        "311--323",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p311-cosmadakis/p311-cosmadakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p311-cosmadakis/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p311-cosmadakis/",
  abstract =     "A Datalog program is {\em bounded\/} iff it is
                 equivalent to a recursion-free Datalog program. We show
                 that, for some classes of Datalog programs,
                 expressibility in first-order query languages coincides
                 with boundedness. Our results imply that testing
                 first-order expressibility is undecidable for binary
                 programs, decidable for monadic programs, and complete
                 for 0 2.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Performance",
  keywords =     "design; languages; performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Datalog}; Information Systems --- Database
                 Management --- Languages (H.2.3): {\bf Query
                 languages}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Mathematical Logic
                 (F.4.1)",
}

@InProceedings{Lomet:1989:AMM,
  author =       "David Lomet and Betty Salzberg",
  title =        "Access methods for multiversion data",
  crossref =     "Clifford:1989:PAS",
  pages =        "315--324",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p315-lomet/p315-lomet.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p315-lomet/",
  abstract =     "We present an access method designed to provide a
                 single integrated index structure for a versioned
                 timestamped database with a non-deletion policy.
                 Historical data (superceded versions) is stored
                 separately from current data. Our access method is
                 called the {\em Time-Split B-tree}. It is an index
                 structure based on Malcolm Easton's Write Once B-tree.
                 \par

                 The Write Once B-tree was developed for data stored
                 entirely on a Write-Once Read-Many or {\em WORM\/}
                 optical disk. The Time-Split B-tree differs from the
                 Write Once B-tree in the following ways: \par

                 Current data {\em must\/} be stored on an {\em
                 erasable\/} random-access device. \par

                 Historical data {\em may\/} be stored on {\em any\/}
                 random-access device, including WORMs, erasable optical
                 disks, and magnetic disks. The point is to use a faster
                 and more expensive device for the current data and a
                 slower cheaper device for the historical data. \par

                 The splitting policies have been changed to reduce
                 redundancy in the structure--the option of pure key
                 splits as in B + -trees and a choice of split times for
                 time-based splits enable this performance enhancement.
                 \par

                 When data is migrated from the current to the
                 historical database, it is consolidated and appended to
                 the end of the historical database, allowing for high
                 space utilization in WORM disk sectors. \par

                 ",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Information Storage and
                 Retrieval --- Content Analysis and Indexing (H.3.1):
                 {\bf Indexing methods}; Information Systems ---
                 Information Storage and Retrieval --- Information
                 Storage (H.3.2); Information Systems --- Database
                 Management --- Physical Design (H.2.2): {\bf Access
                 methods}",
}

@InProceedings{Dublish:1989:EBA,
  author =       "P. Dublish and S. N. Maheshwari",
  title =        "Expressibility of bounded-arity fixed-point query
                 hierarchies",
  crossref =     "ACM:1989:PPE",
  pages =        "324--335",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p324-dublish/p324-dublish.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p324-dublish/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p324-dublish/",
  abstract =     "The expressibility of bounded-arity query hierarchies
                 resulting from the extension of first-order logic by
                 the least fixed-point, inductive fixed-point and
                 generalized fixed-point operators is studied. In each
                 case, it is shown that increasing the arity of the
                 predicate variable from k to k+1 always allows some
                 more k-ary predicates to be expressed. Further, k-ary
                 inductive fixed-points are shown to be more expressive
                 than k-ary least fixed-points and k-ary generalized
                 fixed-points are shown to be more expressive than k-ary
                 inductive fixed-points.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Theory",
  keywords =     "algorithms; design; languages; theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Lambda calculus and
                 related systems}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2)",
}

@InProceedings{Shekita:1989:PET,
  author =       "Eugene J. Shekita and Michael J. Carey",
  title =        "Performance enhancement through replication in an
                 object-oriented {DBMS}",
  crossref =     "Clifford:1989:PAS",
  pages =        "325--336",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p325-shekita/p325-shekita.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p325-shekita/",
  abstract =     "In this paper we describe how replicated data can be
                 used to speedup query processing in an object-oriented
                 database system. The general idea is to use replicated
                 data to eliminate some of the functional joins that
                 would otherwise be required for query processing. We
                 refer to our technique for replicating data as {\em
                 field replication\/} because it allows individual data
                 fields to be selectively replicated. In the paper we
                 describe how field replication can be specified at the
                 data model level and we present storage-level
                 mechanisms to efficiently support it. We also present
                 an analytical cost model to give some feel for how
                 beneficial field replication can be and the
                 circumstances under which it breaks down. While field
                 replication is a relatively simple notion, the analysis
                 shows that it can provide significant performance gains
                 in many situations.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}",
}

@InProceedings{Kedem:1989:RDB,
  author =       "Z. M. Kedem and A. Tuzhilin",
  title =        "Relational database behavior: utilizing relational
                 discrete event systems and models",
  crossref =     "ACM:1989:PPE",
  pages =        "336--346",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p336-kedem/p336-kedem.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p336-kedem/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p336-kedem/",
  abstract =     "Behavior of relational databases is studied within the
                 framework of {\em Relational Discrete Event Systems\/}
                 (RDE-Ses) and {\em Models\/} (RDEMs). Production system
                 and recurrence equation RDEMs are introduced, and their
                 expressive powers are compared. Non-deterministic
                 behavior is defined for both RDEMs and the expressive
                 power of deterministic and non-deterministic production
                 rule programs is also compared. This comparison shows
                 that non-determinism increases expressive power of
                 production systems. A formal concept of a production
                 system interpreter is defined, and several specific
                 interpreters are proposed. One interpreter, called {\em
                 parallel deterministic}, is shown to be better than
                 others in many respects, including the conflict
                 resolution module of OPS5.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Theory",
  keywords =     "algorithms; design; performance; theory",
  subject =      "{\bf H.2.6} Information Systems, DATABASE MANAGEMENT,
                 Database Machines. {\bf F.1.2} Theory of Computation,
                 COMPUTATION BY ABSTRACT DEVICES, Modes of Computation,
                 Alternation and nondeterminism. {\bf H.2.3} Information
                 Systems, DATABASE MANAGEMENT, Languages, Query
                 languages. {\bf H.2.4} Information Systems, DATABASE
                 MANAGEMENT, Systems.",
}

@InProceedings{Kim:1989:COR,
  author =       "Won Kim and Elisa Bertino and Jorge F. Garza",
  title =        "Composite objects revisited",
  crossref =     "Clifford:1989:PAS",
  pages =        "337--347",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p337-kim/p337-kim.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p337-kim/",
  abstract =     "In object-oriented systems, an object may recursively
                 reference any number of other objects. The references,
                 however, do not capture any special relationships
                 between objects. An important semantic relationship
                 which may be superimposed on a reference is the
                 IS-PART-OF relationship between a pair of objects. A
                 set of objects related by the IS-PART-OF relationship
                 is collectively called a composite object. \par

                 An earlier paper [KIM87b] presented a model of
                 composite objects which has been implemented in the
                 ORION object-oriented database system at MCC. Although
                 the composite-object feature has been found quite
                 useful, the model suffers from a number of serious
                 shortcomings, primarily because it overloads a number
                 of orthogonal semantics on the references. In this
                 paper, first we present a more general model of
                 composite objects which does not suffer from these
                 shortcomings. Further, [KIM87b] made an important
                 contribution by exploring the use of composite objects
                 as a unit for versions, physical clustering, and
                 concurrency control. The extended model of composite
                 objects necessitates non-trivial changes to the results
                 of [KIM87b]. This paper describes the new results on
                 the use of composite objects as a unit of not only
                 versions, physical clustering and concurrency control,
                 but also authorization.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Semantics of Programming Languages
                 (F.3.2); Information Systems --- Database Management
                 --- Logical Design (H.2.1): {\bf Data models};
                 Information Systems --- Information Storage and
                 Retrieval --- Systems and Software (H.3.4): {\bf
                 ORION}",
}

@InProceedings{Hull:1989:USI,
  author =       "R. Hull and J. Su",
  title =        "Untyped sets, invention, and computable queries",
  crossref =     "ACM:1989:PPE",
  pages =        "347--359",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p347-hull/p347-hull.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p347-hull/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p347-hull/",
  abstract =     "Conventional database query languages are considered
                 in the context of untyped sets. The algebra without
                 while has the expressive power of the typed complex
                 object algebra. The algebra plus while, and COL with
                 untyped sets (under stratified semantics or
                 inflationary semantics) have the power of the
                 computable queries. The calculus has power beyond the
                 computable queries; and is characterized using the
                 typed complex object calculus with invention. The
                 Bancilhon-Khoshafian calculus is also discussed. A
                 technical tool, called ``generic Turing machine'', is
                 introduced and used in several of the proofs.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance; Theory",
  keywords =     "design; performance; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf F.3.1} Theory of
                 Computation, LOGICS AND MEANINGS OF PROGRAMS,
                 Specifying and Verifying and Reasoning about Programs,
                 Logics of programs. {\bf H.2.1} Information Systems,
                 DATABASE MANAGEMENT, Logical Design, Data models.",
}

@InProceedings{Chang:1989:EIS,
  author =       "E. E. Chang and R. H. Katz",
  title =        "Exploiting inheritance and structure semantics for
                 effective clustering and buffering in an
                 object-oriented {DBMS}",
  crossref =     "Clifford:1989:PAS",
  pages =        "348--357",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p348-chang/p348-chang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p348-chang/",
  abstract =     "Object-oriented databases provide new kinds of data
                 semantics in terms of inheritance and structural
                 relationships. This paper examines how to use these
                 additional semantics to obtain more effective object
                 buffering and clustering. We use the information
                 collected from real-world object-oriented applications,
                 the Berkeley CAD Group's OCT design tools, as the basis
                 for a simulation model with which to investigate
                 alternative buffering and clustering strategies.
                 Observing from our measurements that real CAD
                 applications exhibit high data read to write ratios, we
                 propose a run-time clustering algorithm whose initial
                 evaluation indicates that system response time can be
                 improved by a factor of 200\% when the read/write ratio
                 is high. We have also found it useful to limit the
                 amount of I/O allowed to the clustering algorithm as it
                 examines candidate pages for clustering at run-time.
                 Basically, there is little performance distinction
                 between limiting reclustering to a few I/Os or many, so
                 a low limit on I/O appears to be acceptable. We also
                 examine, under a variety of workload assumptions,
                 context-sensitive buffer replacement policies with
                 alternative prefetching policies.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance",
  subject =      "Computer Applications --- Computer-Aided Engineering
                 (J.6): {\bf Computer-aided design (CAD)}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2); Software
                 --- Programming Languages --- Language Constructs and
                 Features (D.3.3): {\bf Abstract data types};
                 Information Systems --- Database Management --- Systems
                 (H.2.4)",
}

@InProceedings{Graefe:1989:DQE,
  author =       "G. Graefe and K. Ward",
  title =        "Dynamic query evaluation plans",
  crossref =     "Clifford:1989:PAS",
  pages =        "358--366",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p358-graefe/p358-graefe.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p358-graefe/",
  abstract =     "In most database systems, a query embedded in a
                 program written in a conventional programming language
                 is optimized when the program is compiled. The query
                 optimizer must make assumptions about the values of the
                 program variables that appear as constants in the
                 query, the resources that can be committed to query
                 evaluation, and the data in the database. The
                 optimality of the resulting query evaluation plan
                 depends on the validity of these assumptions. If a
                 query evaluation plan is used repeatedly over an
                 extended period of time, it is important to determine
                 when reoptimization is necessary. Our work aims at
                 developing criteria when reoptimization is required,
                 how these criteria can be implemented efficiently, and
                 how reoptimization can be avoided by using a new
                 technique called {\em dynamic query evaluation plans}.
                 We experimentally demonstrate the need for dynamic
                 plans and outline modifications to the EXODUS optimizer
                 generator required for creating dynamic query
                 evaluation plans.",
  acknowledgement = ack-nhfb,
  generalterms = "Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Software ---
                 Programming Languages --- Processors (D.3.4): {\bf
                 Optimization}; Software --- Programming Languages ---
                 Language Classifications (D.3.2): {\bf EXODUS}",
}

@InProceedings{Lecluse:1989:MCS,
  author =       "C. L{\'e}cluse and P. Richard",
  title =        "Modeling complex structures in object-oriented logic
                 programming",
  crossref =     "ACM:1989:PPE",
  pages =        "360--368",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p360-lecluse/p360-lecluse.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p360-lecluse/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p360-lecluse/",
  abstract =     "In this paper, we present a type model for
                 object-oriented databases. Most object-oriented
                 databases only provide users with flat objects whose
                 structure is a record of other objects. In order to
                 have a powerful expression power, an object-oriented
                 database should not only provide objects but also
                 complex values recursively built using the set, tuple
                 and disjunctive constructors. Our type model presents
                 two notions: that of classes whose instances are
                 objects with identity and that of types whose instances
                 are complex values. The two notions are mixed in that
                 an object is modeled as a pair containing an identifier
                 and a value, and a value is a complex structure which
                 contains objects and values. We define in this context
                 the notions of subtyping and provide a set inclusion
                 semantics for subtyping.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Theory",
  keywords =     "algorithms; design; performance; theory",
  subject =      "{\bf F.3.3} Theory of Computation, LOGICS AND MEANINGS
                 OF PROGRAMS, Studies of Program Constructs, Type
                 structure. {\bf H.2.0} Information Systems, DATABASE
                 MANAGEMENT, General. {\bf F.3.2} Theory of Computation,
                 LOGICS AND MEANINGS OF PROGRAMS, Semantics of
                 Programming Languages. {\bf F.4.1} Theory of
                 Computation, MATHEMATICAL LOGIC AND FORMAL LANGUAGES,
                 Mathematical Logic, Lambda calculus and related
                 systems.",
}

@InProceedings{Swami:1989:OLJ,
  author =       "A. Swami",
  title =        "Optimization of large join queries: combining
                 heuristics and combinatorial techniques",
  crossref =     "Clifford:1989:PAS",
  pages =        "367--376",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p367-swami/p367-swami.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p367-swami/",
  abstract =     "We investigate the use of heuristics in optimizing
                 queries with a large number of joins. Examples of such
                 heuristics are the augmentation and local improvement
                 heuristics described in this paper and a heuristic
                 proposed by Krishnamurthy et al. We also study the
                 combination of these heuristics with two general
                 combinatorial optimization techniques, iterative
                 improvement and simulated annealing, that were studied
                 in a previous paper. Several interesting combinations
                 are experimentally compared. For completeness, we also
                 include simple iterative improvement and simulated
                 annealing in our experimental comparisons. We find that
                 two combinations of the augmentation heuristic and
                 iterative improvement perform the best under most
                 conditions. The results are validated using two
                 different cost models and several different synthetic
                 benchmarks.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Economics; Experimentation; Languages;
                 Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Mathematics of
                 Computing --- Discrete Mathematics --- Combinatorics
                 (G.2.1): {\bf Combinatorial algorithms}",
}

@InProceedings{Chen:1989:CLC,
  author =       "W. Chen and D. S. Warren",
  title =        "{C}-logic of complex objects",
  crossref =     "ACM:1989:PPE",
  pages =        "369--378",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p369-chen/p369-chen.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p369-chen/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p369-chen/",
  abstract =     "Our objective is to have a logical framework for
                 natural representation and manipulation of complex
                 objects. We start with an analysis of semantic modeling
                 of complex objects, and attempt to understand what are
                 the fundamental aspects which need to be captured. A
                 logic, called C-logic, is then presented which provides
                 direct support for what we believe to be basic features
                 of complex objects, including object identity,
                 multi-valued labels and a dynamic notion of types.
                 C-logic has a simple first-order semantics, but it also
                 allows natural specification of complex objects and
                 gives us a framework for exploring efficient logic
                 deduction over complex objects.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Theory",
  keywords =     "algorithms; design; performance; theory",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Semantics of Programming Languages
                 (F.3.2); Theory of Computation --- Mathematical Logic
                 and Formal Languages --- Mathematical Logic (F.4.1):
                 {\bf Lambda calculus and related systems}; Information
                 Systems --- Database Management --- General (H.2.0);
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1)",
}

@InProceedings{Haas:1989:EQP,
  author =       "L. M. Haas and J. C. Freytag and G. M. Lohman and H.
                 Pirahesh",
  title =        "Extensible query processing in starburst",
  crossref =     "Clifford:1989:PAS",
  pages =        "377--388",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p377-haas/p377-haas.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p377-haas/",
  abstract =     "Today's DBMSs are unable to support the increasing
                 demands of the various applications that would like to
                 use a DBMS. Each kind of application poses new
                 requirements for the DBMS. The Starburst project at
                 IBM's Almaden Research Center aims to extend relational
                 DBMS technology to bridge this gap between applications
                 and the DBMS. While providing a full function
                 relational system to enable sharing across
                 applications, Starburst will also allow (sophisticated)
                 programmers to add many kinds of extensions to the base
                 system's capabilities, including language extensions
                 (e.g., new datatypes and operations), data management
                 extensions (e.g., new access and storage methods) and
                 internal processing extensions (e.g., new join methods
                 and new query transformations). To support these
                 features, the database query language processor must be
                 very powerful and highly extensible. Starburst's
                 language processor features a powerful query language,
                 rule-based optimization and query rewrite, and an
                 execution system based on an extended relational
                 algebra. In this paper, we describe the design of
                 Starburst's query language processor and discuss the
                 ways in which the language processor can be extended to
                 achieve Starburst's goals.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Verification",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Theory of
                 Computation --- Analysis of Algorithms and Problem
                 Complexity --- Numerical Algorithms and Problems
                 (F.2.1)",
}

@InProceedings{Kifer:1989:LOO,
  author =       "M. Kifer and J. Wu",
  title =        "A logic for object-oriented logic programming",
  crossref =     "ACM:1989:PPE",
  pages =        "379--393",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p379-kifer/p379-kifer.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p379-kifer/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p379-kifer/",
  abstract =     "We present a logic for reasoning about complex
                 objects, which is a revised and significantly extended
                 version of Maier's O-logic [Mai86]. The logic naturally
                 supports complex objects, object identity, deduction,
                 is tolerant to inconsistent data, and has many other
                 interesting features. It elegantly combines the
                 object-oriented and value-oriented paradigms and, in
                 particular, contains all of the predicate calculus as a
                 special case. Our treatment of sets is also noteworthy:
                 it is more general than ELPS [Kup87] and COL [AbG87],
                 yet it avoids the semantic problems encountered in LDL
                 [BNS87]. The proposed logic has a sound and complete
                 resolution-based proof procedure.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  keywords =     "design; languages; theory",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Specifying and Verifying and Reasoning
                 about Programs (F.3.1): {\bf Logics of programs};
                 Information Systems --- Database Management --- General
                 (H.2.0); Theory of Computation --- Logics and Meanings
                 of Programs --- Semantics of Programming Languages
                 (F.3.2)",
}

@InProceedings{Tang:1989:SPS,
  author =       "T. Tang and N. Natarajan",
  title =        "A static pessimistic scheme for handling replicated
                 databases",
  crossref =     "Clifford:1989:PAS",
  pages =        "389--398",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p389-tang/p389-tang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p389-tang/",
  abstract =     "A replicated database system may partition into
                 isolated groups in the presence of node and link
                 failures. When the system has partitioned, a {\em
                 pessimistic scheme\/} maintains availability and
                 consistency of replicated data by ensuring that updates
                 occur in at most one group. A pessimistic scheme is
                 called a {\em static scheme\/} if these {\em
                 distinguished\/} groups are determined only by the
                 membership of different groups in the partitioned
                 system. In this paper, we present a new static scheme
                 that is more powerful than voting. In this scheme, the
                 set of distinguished groups, called an {\em acceptance
                 set}, is chosen at design time. To commit an update, a
                 node checks if its enclosing group is a member of this
                 acceptance set. Using an encoding scheme for groups,
                 this check is implemented very efficiently. Another
                 merit of the proposed scheme is that the problem of
                 determining an {\em optimal\/} acceptance set is
                 formulated as a {\em sparse 0-1 linear programming
                 problem}. Hence, the optimization problem can be
                 handled using the very rich class of existing
                 techniques for solving such problems. Based on our
                 experiments, we feel that this optimization approach is
                 feasible for systems containing up to 10 nodes
                 (copies).",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Verification",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases}; Theory of
                 Computation --- Analysis of Algorithms and Problem
                 Complexity --- Numerical Algorithms and Problems
                 (F.2.1); Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Concurrency}",
}

@InProceedings{Borgida:1989:TSQ,
  author =       "A. Borgida",
  title =        "Type systems for querying class hierarchies with
                 non-strict inheritance",
  crossref =     "ACM:1989:PPE",
  pages =        "394--400",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/73721/p394-borgida/p394-borgida.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/73721/p394-borgida/;
                 http://www.acm.org:80/pubs/citations/proceedings/pods/73721/p394-borgida/",
  abstract =     "Type checking at query compilation time is important
                 for both detecting programmer errors and reducing the
                 running time of queries. We have argued elsewhere [2]
                 that entity-based data management systems which support
                 class hierarchies, such as semantic data models and
                 object-oriented dbms, should not be confined to have ``
                 {\em strict inheritance\/} '' -- i.e., they should
                 permit contradictions between class specifications,
                 albeit in an explicit and controlled way. In this paper
                 we present a type system for queries manipulating
                 objects in such classes. We provide sound and complete
                 axiomatizations of the predications ``{\em is a
                 subtype of\/}'' and ``{\em expression $e$ has
                 type\/}''. The absence of strict inheritance has
                 normally been felt to preclude effective type
                 checking. We show that the problem is co-NP-hard when
                 disjoint types are admitted in the schema, but present
                 a low-order polynomial-time algorithm that determines
                 the absence of type errors in a query when the database
                 has only entities.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Theory",
  keywords =     "design; languages; theory",
  subject =      "{\bf H.2.4} Information Systems, DATABASE MANAGEMENT,
                 Systems, Query processing. {\bf D.3.4} Software,
                 PROGRAMMING LANGUAGES, Processors, Compilers. {\bf
                 F.3.3} Theory of Computation, LOGICS AND MEANINGS OF
                 PROGRAMS, Studies of Program Constructs, Type
                 structure.",
}

@InProceedings{Ellis:1989:CCG,
  author =       "C. A. Ellis and S. J. Gibbs",
  title =        "Concurrency control in groupware systems",
  crossref =     "Clifford:1989:PAS",
  pages =        "399--407",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p399-ellis/p399-ellis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p399-ellis/",
  abstract =     "Groupware systems are computer-based systems that
                 support two or more users engaged in a common task, and
                 that provide an interface to a shared environment.
                 These systems frequently require fine-granularity
                 sharing of data and fast response times. This paper
                 distinguishes real-time groupware systems from other
                 multi-user systems and discusses their concurrency
                 control requirements. An algorithm for concurrency
                 control in real-time groupware systems is then
                 presented. The advantages of this algorithm are its
                 simplicity of use and its responsiveness: users can
                 operate directly on the data without obtaining locks.
                 The algorithm must know some semantics of the
                 operations. However the algorithm's overall structure
                 is independent of the semantic information, allowing
                 the algorithm to be adapted to many situations. An
                 example application of the algorithm to group text
                 editing is given, along with a sketch of its proof of
                 correctness in this particular case. We note that the
                 behavior desired in many of these systems is
                 non-serializable.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Human Factors",
  subject =      "Information Systems --- Models and Principles ---
                 User/Machine Systems (H.1.2): {\bf Human factors};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}; Computer Applications ---
                 Computers in Other Systems (J.7): {\bf Real time}",
}

@InProceedings{Agrawal:1989:MSM,
  author =       "D. Agrawal and S. Sengupta",
  title =        "Modular synchronization in multiversion databases:
                 version control and concurrency control",
  crossref =     "Clifford:1989:PAS",
  pages =        "408--417",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p408-agrawal/p408-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p408-agrawal/",
  abstract =     "In this paper we propose a version control mechanism
                 that enhances the modularity and extensibility of
                 multiversion concurrency control algorithms. We
                 decouple the multiversion algorithms into two
                 components: version control and concurrency control.
                 This permits modular development of multiversion
                 protocols, and simplifies the task of proving the
                 correctness of these protocols. An interesting feature
                 of our framework is that the execution of read-only
                 transactions becomes completely independent of the
                 underlying concurrency control implementation. Also,
                 algorithms with the version control mechanism have
                 several advantages over most other multiversion
                 algorithms.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Verification",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Database Administration
                 (H.2.7): {\bf Logging and recovery}; Software ---
                 Software Engineering --- Distribution, Maintenance, and
                 Enhancement (D.2.7): {\bf Version control}",
}

@InProceedings{DeTroyer:1989:RTC,
  author =       "O. {De Troyer}",
  title =        "{RIDL}*: a tool for the computer-assisted engineering
                 of large databases in the presence of integrity
                 constraints",
  crossref =     "Clifford:1989:PAS",
  pages =        "418--429",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p418-de_troyer/p418-de_troyer.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p418-de_troyer/",
  abstract =     "Tools and methods that transform higher level
                 formalisms into logical database designs become very
                 important. Rarely if ever do these transformations take
                 into account integrity constraints existing in the
                 ``conceptual'' model. Yet these become essential if one
                 is forced to introduce redundancies for reasons of
                 e.g., query efficiency. We therefore adopted the Binary
                 Relationship Model (or ``NIAM'') that is rich in
                 constraints and built a flexible tool, RIDL *, that
                 graphically captures NIAM semantic networks, analyzes
                 them and then transforms them into relational designs
                 (normalized or not), under the control of a database
                 engineer assisted by a rule base. This is made possible
                 by a rule-driven implementation of a new, stepwise
                 synthesis process, and its benefits are illustrated by
                 its treatment of e.g., subtypes. RIDL * is operational
                 at several industrial sites in Europe and the U.S. on
                 sizable database projects.",
  acknowledgement = ack-nhfb,
  generalterms = "Design",
  subject =      "Software --- Software Engineering --- Design Tools and
                 Techniques (D.2.2): {\bf Programmer workbench**};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Schema and subschema}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1); Computing Methodologies --- Computer Graphics
                 --- Methodology and Techniques (I.3.6): {\bf
                 Languages}",
}

@InProceedings{Markowitz:1989:CRE,
  author =       "Victor M. Markowitz and Arie Shoshani",
  title =        "On the correctness of representing extended
                 entity-relationship structures in the relational
                 model",
  crossref =     "Clifford:1989:PAS",
  pages =        "430--439",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p430-markowitz/p430-markowitz.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p430-markowitz/",
  abstract =     "Although the relational representation of {\em
                 Entity-Relationship\/} (ER) structures gained extensive
                 coverage, scarce attention has been paid to the issue
                 of {\em correctness\/} for such representations.
                 Several mappings have been proposed for the
                 representation of both ER and extended ER (EER)
                 structures by relational schemas. The informal nature
                 of most of these proposals, however, does not allow a
                 precise evaluation of their correctness, nor a
                 comparison of the various mappings. We propose a {\em
                 canonical\/} relational representation for EER
                 structures and prove its correctness. We claim that a
                 relational schema represents correctly an EER structure
                 if it has {\em equivalent\/} information-capacity with
                 the corresponding canonical representation. \par

                 The second problem addressed by this paper is the
                 normalization of relational schemas that represent EER
                 structures. We examine the conditions required by this
                 process and show that ignoring these conditions leads
                 to erroneous analyses and inappropriate design
                 decisions. We show that, under these conditions, the
                 canonical relational representation of any
                 (unrestricted) EER structure has an
                 (information-capacity) equivalent {\em Boyce-Codd
                 Normal Form\/} schema.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance; Verification",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Schema and subschema}; Theory of
                 Computation --- Logics and Meanings of Programs ---
                 Semantics of Programming Languages (F.3.2)",
}

@InProceedings{Navathe:1989:VPD,
  author =       "Shamkant B. Navathe and Mingyoung Ra",
  title =        "Vertical partitioning for database design: a graphical
                 algorithm",
  crossref =     "Clifford:1989:PAS",
  pages =        "440--450",
  year =         "1989",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/67544/p440-navathe/p440-navathe.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/67544/p440-navathe/",
  abstract =     "Vertical partitioning is the process of subdividing
                 the attributes of a relation or a record type, creating
                 fragments. Previous approaches have used an iterative
                 binary partitioning method which is based on clustering
                 algorithms and mathematical cost functions. In this
                 paper, however, we propose a new vertical partitioning
                 algorithm using a graphical technique. This algorithm
                 starts from the attribute affinity matrix by
                 considering it as a complete graph. Then, forming a
                 linearly connected spanning tree, it generates all
                 meaningful fragments simultaneously by considering a
                 cycle as a fragment. We show its computational
                 superiority. It provides a cleaner alternative without
                 arbitrary objective functions and provides an
                 improvement over our previous work on vertical
                 partitioning.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Mathematics
                 of Computing --- Discrete Mathematics --- Graph Theory
                 (G.2.2): {\bf Graph algorithms}; Theory of Computation
                 --- Computation by Abstract Devices --- Complexity
                 Measures and Classes (F.1.3)",
}

@InProceedings{Lipton:1990:PSE,
  author =       "Richard J. Lipton and Jeffrey F. Naughton and Donovan
                 A. Schneider",
  title =        "Practical selectivity estimation through adaptive
                 sampling",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "1--11",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p1-lipton/p1-lipton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p1-lipton/",
  abstract =     "Recently we have proposed an adaptive, random sampling
                 algorithm for general query size estimation. In earlier
                 work we analyzed the asymptotic efficiency and accuracy
                 of the algorithm, in this paper we investigate its
                 practicality as applied to selects and joins. First, we
                 extend our previous analysis to provide significantly
                 improved bounds on the amount of sampling necessary for
                 a given level of accuracy. Next, we provide ``sanity
                 bounds'' to deal with queries for which the underlying
                 data is extremely skewed or the query result is very
                 small. Finally, we report on the performance of the
                 estimation algorithm as implemented in a host language
                 on a commercial relational system. The results are
                 encouraging, even with this loose coupling between the
                 estimation algorithm and the DBMS.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Mathematics of Computing ---
                 Probability and Statistics (G.3): {\bf Probabilistic
                 algorithms (including Monte Carlo)}",
}

@InProceedings{Kim:1990:RDO,
  author =       "Won Kim",
  title =        "Research directions in object-oriented database
                 systems",
  crossref =     "ACM:1990:PPN",
  pages =        "1--15",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p1-kim/p1-kim.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p1-kim/",
  abstract =     "The set of object-oriented concepts found in
                 object-oriented programming languages forms a good
                 basis for a data model for post-relational database
                 systems which will extend the domain of database
                 applications beyond conventional business data
                 processing. However, despite the high level of research
                 and development activities during the past several
                 years, there is no standard object-oriented data model,
                 and criticisms and concerns about the field still
                 remain. In this paper, I will first provide a
                 historical perspective on the emergence of
                 object-oriented database systems in order to derive a
                 definition of object-oriented database systems. I will
                 then examine a number of major challenge which remain
                 for researchers and implementors of object-oriented
                 database systems.",
  acknowledgement = ack-nhfb,
  classification = "C6110 (Systems analysis and programming); C6160Z
                 (Other DBMS)",
  corpsource =   "Microelectron. and Comput. Technol. Corp., Austin, TX,
                 USA",
  generalterms = "Design; Management; Performance; Standardization;
                 Theory",
  keywords =     "database management systems; object-oriented database
                 systems; object-oriented programming",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Object-oriented databases};
                 Information Systems --- Information Storage and
                 Retrieval --- Information Search and Retrieval (H.3.3);
                 Computing Milieux --- The Computer Industry (K.1): {\bf
                 Standards}",
  treatment =    "B Bibliography; G General Review; P Practical",
}

@InProceedings{King:1990:BAT,
  author =       "Roger King and Ali Morfeq",
  title =        "Bayan: an {Arabic} text database management system",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "12--23",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p12-king/p12-king.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p12-king/",
  abstract =     "Most existing databases lack features which allow for
                 the convenient manipulation of text. It is even more
                 difficult to use them if the text language is not based
                 on the Roman alphabet. The Arabic language is a very
                 good example of this case. Many projects have attempted
                 to use conventional database systems for Arabic data
                 manipulation (including text data), but because of
                 Arabic's many differences with English, these projects
                 have met with limited success. In the Bayan project,
                 the approach has been different. Instead of simply
                 trying to adopt an environment to Arabic, the
                 properties of the Arabic language were the starting
                 point and everything was designed to meet the needs of
                 Arabic, thus avoiding the shortcomings of other
                 projects. A text database management system was
                 designed to overcome the shortcomings of conventional
                 database management systems in manipulating text data.
                 Bayan's data model is based on an object-oriented
                 approach which helps the extensibility of the system
                 for future use. In Bayan, we designed the database with
                 the Arabic text properties in mind. We designed it to
                 support the way Arabic words are derived, classified,
                 and constructed. Furthermore, linguistic algorithms
                 (for word generation and morphological decomposition of
                 words) were designed, leading to a formalization of
                 rules of Arabic language writing and sentence
                 construction. A user interface was designed on top of
                 this environment. A new representation of the Arabic
                 characters was designed, a complete Arabic keyboard
                 layout was created, and a window-based Arabic user
                 interface was also designed.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Computing Methodologies --- Document and Text
                 Processing --- Document and Text Editing (I.7.1);
                 Information Systems --- Database Management ---
                 Database Applications (H.2.8); Computer Applications
                 --- Arts and Humanities (J.5): {\bf Linguistics};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}",
}

@InProceedings{Abiteboul:1990:MS,
  author =       "Serge Abiteboul and Paris C. Kanellakis and Emmanuel
                 Waller",
  title =        "Method schemas",
  crossref =     "ACM:1990:PPN",
  pages =        "16--27",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p16-abiteboul/p16-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p16-abiteboul/",
  abstract =     "The concept of {\em method schemas\/} is proposed as a
                 simple model for object-oriented programming with
                 features such as {\em classes with methods and
                 inheritance, method name overloading}, and {\em late
                 binding}. An important issue is to check whether a
                 given method schema can possibly lead to
                 inconsistencies in some interpretations. The
                 consistency problem for method schemas is studied. The
                 problem is shown to be undecidable in general.
                 Decidability is obtained for {\em monadic\/} and/or
                 {\em recursion-free\/} method schemas. The effect of
                 {\em covariance\/} is considered. The issues of
                 incremental consistency checking and of a sound
                 algorithm for the general case are briefly discussed.",
  acknowledgement = ack-nhfb,
  classification = "C4210 (Formal logic); C4250 (Database theory); C6110
                 (Systems analysis and programming); C6160Z (Other
                 DBMS)",
  corpsource =   "INRIA, Le Chesnay, France",
  generalterms = "Algorithms; Design; Languages; Management;
                 Performance; Theory",
  keywords =     "classes with methods and inheritance; covariance;
                 database management systems; database theory;
                 databases; decidability; incremental consistency
                 checking; late binding; method name overloading;
                 monadic method schemas; object-oriented programming;
                 recursion-free method schemas; sound algorithm;
                 undecidable",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Schema and subschema};
                 Software --- Programming Techniques --- Object-oriented
                 Programming (D.1.5)",
  treatment =    "P Practical; T Theoretical or Mathematical",
}

@InProceedings{Yu:1990:RDO,
  author =       "Lin Yu and Daniel J. Rosenkrantz",
  title =        "Representability of design objects by
                 ancestor-controlled hierarchical specifications",
  crossref =     "ACM:1990:PPN",
  pages =        "28--39",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p28-yu/p28-yu.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p28-yu/",
  abstract =     "A simple model, called a VDAG, is proposed for
                 representing hierarchically specified design data in
                 CAD database systems where there are to be alternate
                 expansions of hierarchically specified modules. The
                 model uses an ancestor-based expansion scheme to
                 control which instances of submodules are to be placed
                 within each instance of a given module. The approach is
                 aimed at reducing storage space in engineering design
                 database systems, and providing a means for designers
                 to specify alternate expansions of a module. \par

                 The expressive power of the VDAG model is investigated,
                 and the set of design forests which are VDAG-generable
                 is characterized. The problem of determining whether a
                 given design forest is VDAG-generable is shown to be
                 {\em NP\/} -complete, even when the height of the
                 forest is bounded. However, it is shown that
                 determining whether a given forest is VDAG-generable
                 and producing such a VDAG if it exists, can be
                 partitioned into a number of simpler subproblems, each
                 of which may not be too computationally difficult in
                 practice. Furthermore, for forests in a special natural
                 class that has broad applicability, a polynomial time
                 algorithm is provided that determines whether a given
                 forest is VDAG-generable, and produces such a VDAG if
                 it exists. However, we show that it is {\em NP\/} -hard
                 to produce a minimum-sized such VDAG for forests in
                 this special class, even when the height of the forest
                 is bounded.",
  acknowledgement = ack-nhfb,
  classification = "C1160 (Combinatorial mathematics); C4240
                 (Programming and algorithm theory); C4250 (Database
                 theory); C6160 (Database management systems (DBMS))",
  corpsource =   "Dept. of Comput. Sci., State Univ. of New York,
                 Albany, NY, USA",
  generalterms = "Design; Management; Performance; Theory;
                 Verification",
  keywords =     "ancestor- based expansion scheme; ancestor-controlled
                 hierarchical specifications; bounded forest height; CAD
                 database systems; computational complexity; database
                 management systems; database theory; design data
                 representation; design forests; design objects
                 representation; hierarchically specified design data;
                 hierarchically specified modules; NP-complete; NP-hard;
                 polynomial time algorithm; trees (mathematics); VDAG
                 model; VDAG- generable",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computer Applications --- Computer-Aided Engineering
                 (J.6); Information Systems --- Database Management ---
                 Database Applications (H.2.8); Mathematics of Computing
                 --- Discrete Mathematics --- General (G.2.0)",
  treatment =    "P Practical; T Theoretical or Mathematical",
}

@InProceedings{Agrawal:1990:OGI,
  author =       "R. Agrawal and N. H. Gehani and J. Srinivasan",
  title =        "{OdeView}: the graphical interface to {Ode}",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "34--43",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p34-agrawal/p34-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p34-agrawal/",
  abstract =     "OdeView is the graphical front end for Ode, an
                 object-oriented database system and environment. Ode's
                 data model supports data encapsulation, type
                 inheritance, and complex objects. OdeView provides
                 facilities for examining the database schema (i.e., the
                 object type or class hierarchy), examining class
                 definitions, browsing objects, following chains of
                 references starting from an object, synchronized
                 browsing, displaying selected portions of objects
                 (projection), and retrieving objects with specific
                 characteristics (selection). \par

                 OdeView does not need to know about the internals of
                 Ode objects. Consequently, the internals of specific
                 classes are not hardwired into OdeView and new classes
                 can be added to the Ode database without requiring any
                 changes to or recompilation of OdeView. Just as OdeView
                 does not know about the object internals, class
                 functions (methods) for displaying objects are written
                 without knowing about the specifics of the windowing
                 software used by OdeView or the graphical user
                 interface provided by it. \par

                 In this paper, we present OdeView, and discuss its
                 design and implementation.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Computing Methodologies --- Computer Graphics ---
                 Methodology and Techniques (I.3.6): {\bf Interaction
                 techniques}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}; Information Systems --- Database Management
                 --- Systems (H.2.4); Information Systems --- Database
                 Management --- Languages (H.2.3); Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf C++}",
}

@InProceedings{Lipton:1990:QSE,
  author =       "Richard J. Lipton and Jeffrey F. Naughton",
  title =        "Query size estimation by adaptive sampling (extended
                 abstract)",
  crossref =     "ACM:1990:PPN",
  pages =        "40--46",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p40-lipton/p40-lipton.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p40-lipton/",
  abstract =     "We present an adaptive, random sampling algorithm for
                 estimating the size of general queries. The algorithm
                 can be used for any query {$Q$} over a database {$D$}
                 such that (1) for some $n$, the answer to {$Q$} can be
                 partitioned into $n$ disjoint subsets {$Q$} 1, {$Q$} 2,
                 \ldots, {\em Q n}, and (2) for 1 $i$ $n$, the size of
                 {\em Q i\/} is bounded by some function $b$ ( {\em D,
                 Q\/}), and (3) there is some algorithm by which we can
                 compute the size of {\em Q i}, where {$i$} is chosen
                 randomly.  We consider the performance of the algorithm
                 on three special cases of the algorithm: join queries,
                 transitive closure queries, and general recursive
                 Datalog queries.",
  acknowledgement = ack-nhfb,
  annote =       "36 papers; See also 6836.1508 1990 9th for papers",
  classification = "C4250 (Database theory); C6160 (Database management
                 systems (DBMS))",
  corpsource =   "Dept. of Comput. Sci., Princeton Univ., NJ, USA",
  generalterms = "Algorithms; Design; Management; Performance; Theory;
                 Verification",
  keywords =     "ACM; adaptive sampling; database; database systems;
                 database theory; disjoint subsets; general recursive
                 Datalog queries; join queries; performance; query
                 languages; query size estimation; random sampling
                 algorithm; SIGACT; transitive closure queries",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}",
  treatment =    "T Theoretical or Mathematical",
  xxpages =      "18--25",
}

@InProceedings{Ullman:1990:IOC,
  author =       "Jeffrey D. Ullman and Mihalis Yannakakis",
  title =        "The input\slash output complexity of transitive
                 closure",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "44--53",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p44-ullman/p44-ullman.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p44-ullman/",
  abstract =     "Suppose a directed graph has its arcs stored in
                 secondary memory, and we wish to compute its transitive
                 closure, also storing the result in secondary memory.
                 We assume that an amount of main memory capable of
                 holding $s$ ``values'' is available, and that $s$ lies
                 between $n$, the number of nodes of the graph, and $e$,
                 the number of arcs. The cost measure we use for
                 algorithms is the {\em I/O complexity\/} of Kung and
                 Hong, where we count 1 every time a value is moved into
                 main memory from secondary memory, or vice versa.
                 \par

                 In the dense case, where $e$ is close to $n^2$, we show
                 that I/O equal to $(n^3 / s)$ is sufficient to compute
                 the transitive closure of an $n$ -node graph, using
                 main memory of size $s$. Moreover, it is necessary for
                 any algorithm that is ``standard,'' in a sense to be
                 defined precisely in the paper. Roughly, ``standard''
                 means that paths are constructed only by concatenating
                 arcs and previously discovered paths. This class
                 includes the usual algorithms that work for the
                 generalization of transitive closure to semiring
                 problems. For the sparse case, we show that I/O equal
                 to $(n^2 e / s)$ is sufficient, although the algorithm
                 we propose meets our definition of ``standard'' only if
                 the underlying graph is acyclic. We also show that
                 $(n^2 e / s)$ is necessary for any standard algorithm
                 in the sparse case. That settles the I/O complexity of
                 the sparse/acyclic case, for standard algorithms. It is
                 unknown whether this complexity can be achieved in the
                 sparse, cyclic case, by a standard algorithm, and it is
                 unknown whether the bound can be beaten by nonstandard
                 algorithms. \par

                 We then consider a special kind of standard algorithm,
                 in which paths are constructed only by concatenating
                 arcs and old paths, never by concatenating two old
                 paths. This restriction seems essential if we are to
                 take advantage of sparseness. Unfortunately, we show
                 that almost another factor of $n$ I/O is necessary.
                 That is, there is an algorithm in this class using I/O
                 $(n^3 e / s)$ for arbitrary sparse graphs, including
                 cyclic ones. Moreover, every algorithm in the
                 restricted class must use $(n^3 e / s / \log^3 n)$ I/O,
                 on some cyclic graphs.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Theory",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- Nonnumerical Algorithms and
                 Problems (F.2.2): {\bf Computations on discrete
                 structures}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Mathematics of Computing --- Discrete Mathematics ---
                 Graph Theory (G.2.2); Software --- Operating Systems
                 --- Storage Management (D.4.2)",
}

@InProceedings{VanGelder:1990:DCA,
  author =       "Allen {Van Gelder}",
  title =        "Deriving constraints among argument sizes in logic
                 programs (extended abstract)",
  crossref =     "ACM:1990:PPN",
  pages =        "47--60",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p47-van_gelder/p47-van_gelder.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p47-van_gelder/",
  abstract =     "In a logic program the feasible argument sizes of
                 derivable facts involving an $n$ -ary predicate are
                 viewed as a set of points in the positive orthant of
                 {\em R n}. We investigate a method of deriving
                 constraints on the feasible set in the form of a
                 polyhedral convex set in the positive orthant, which we
                 call a {\em polycone}. Faces of this polycone represent
                 inequalities proven to hold among the argument sizes.
                 These inequalities are often useful for selecting an
                 evaluation method that is guaranteed to terminate for a
                 given logic procedure. The methods may be applicable to
                 other languages in which the sizes of data structures
                 can be determined syntactically. \par

                 We introduce a {\em generalized Tucker
                 representation\/} for systems of linear equations and
                 show how needed operations on polycones are performed
                 in this representation. We prove that every polycone
                 has a unique {\em normal form\/} in this
                 representation, and give an algorithm to produce it.
                 This in turn gives a decision procedure for the
                 question of whether two set of linear equations define
                 the same polycone. \par

                 When a predicate has several rules, the union of the
                 individual rule's polycones gives the set of feasible
                 argument size vectors for the predicate. Because this
                 set is not necessarily convex, we instead operate with
                 the smallest enclosing polycone, which is the closure
                 of the convex hull of the union. Retaining convexity is
                 one of the key features of our technique.
                 \par

                 Recursion is handled by finding a polycone that is a
                 fixpoint of a transformation that is derived from both
                 the recursive and nonrecursive rules. Some methods for
                 finding a fixpoint are presented, but there are many
                 unresolved problems in this area.",
  acknowledgement = ack-nhfb,
  classification = "C1110 (Algebra); C4140 (Linear algebra); C4210
                 (Formal logic); C4250 (Database theory)",
  corpsource =   "California Univ., Santa Cruz, CA, USA",
  generalterms = "Algorithms; Design; Management; Performance; Theory;
                 Verification",
  keywords =     "$n$-ary predicate; argument sizes; constraint
                 derivation; convex hull; convexity; database theory;
                 decision procedure; derivable facts; feasible argument
                 size vectors; fixpoint; formal logic; generalized
                 Tucker representation; linear algebra; linear
                 equations; logic programming; logic programs;
                 nonrecursive rules; polycone; polyhedral convex set;
                 positive orthant; recursive rules; transformation;
                 unique normal form",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Logic and constraint programming}; Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3): {\bf Logic programming};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Query processing}; Computing
                 Methodologies --- Artificial Intelligence --- Problem
                 Solving, Control Methods, and Search (I.2.8): {\bf
                 Heuristic methods}",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Agrawal:1990:CSL,
  author =       "D. Agrawal and A. {El Abbadi}",
  title =        "Constrained Shared Locks for Increasing Concurrency in
                 Databases",
  crossref =     "ACM:1990:PPN",
  pages =        "53--63",
  year =         "1990",
  bibdate =      "Mon Mar 16 09:51:33 MST 1998",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/pods.bib",
  acknowledgement = ack-nhfb,
  annote =       "36 papers; See also 6836.1508 1990 9th for papers",
  keywords =     "ACM; database systems; SIGACT",
}

@InProceedings{Shen:1990:IEE,
  author =       "Yeh-Heng Shen",
  title =        "{IDLOG}: extending the expressive power of deductive
                 database languages",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "54--63",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p54-shen/p54-shen.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p54-shen/",
  abstract =     "The expressive power of pure deductive database
                 languages, such as {\em DATALOG\/} and {\em stratified
                 DATALOGS}, is limited in a sense that some useful
                 queries such as functions involving {\em aggregation\/}
                 are not definable in these languages. Our concern in
                 this paper is to provide a uniform logic framework for
                 deductive databases with greater expressive power. It
                 has been shown that with a linear ordering on the
                 domain of the database, the expressive power of some
                 database languages can be enhanced so that some
                 functions involving aggregation can be defined. Yet, a
                 direct implementation of the linear ordering in
                 deductive database languages may seem unintuitive, and
                 may not be very efficient to use in practice. We
                 propose a logic for deductive databases which employs
                 the notion of ``identifying each tuple in a relation''.
                 Through the use of these {\em tuple-identifications},
                 different linear orderings are defined as a result.
                 This intuitively explains the reason why our logic has
                 greater expressive power. The proposed logic language
                 is {\em non-deterministic\/} in nature. However,
                 non-determinism is not the real reason for the enhanced
                 expressive power. A deterministic subset of the
                 programs in this language is {\em computational
                 complete\/} in the sense that it defines all the {\em
                 computable deterministic queries}. Although the problem
                 of deciding whether a program is in this subset is in
                 general undecidable, we do provide a rather general
                 sufficient test for identifying such programs. Also
                 discussed in this paper is an extended notion of
                 queries which allows both the input and the output of a
                 query to contain {\em interpreted constants\/} of an
                 infinite domain. We show that extended queries
                 involving aggregation can also be defined in the
                 language.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Query languages}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Theory of Computation ---
                 Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Computability theory};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Datalog}; Theory of Computation
                 --- Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Logic and constraint
                 programming}",
}

@InProceedings{Kolaitis:1990:EPD,
  author =       "Phokion G. Kolaitis and Moshe Y. Vardi",
  title =        "On the Expressive Power of {Datalog}: Tools and a Case
                 Study",
  crossref =     "ACM:1990:PPN",
  pages =        "61--71",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p61-kolaitis/p61-kolaitis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p61-kolaitis/",
  abstract =     "We study here the language Datalog(), which is the
                 query language obtained from Datalog by allowing
                 equalities and inequalities in the bodies of the rules.
                 We view Datalog() as a fragment of an infinitary logic
                 {$L$} and show that {$L$} can be characterized in terms
                 of certain two-person pebble games. This
                 characterization provides us with tools for
                 investigating the expressive power of Datalog(). As a
                 case study, we classify the expressibility of {\em
                 fixed subgraph homeomorphism\/} queries on directed
                 graphs. Fortune et al. [FHW80] classified the
                 computational complexity of these queries by
                 establishing two dichotomies, which are proper only if
                 P NP. Without using any complexity-theoretic
                 assumptions, we show here that the two dichotomies are
                 indeed proper in terms of expressibility in
                 Datalog().",
  acknowledgement = ack-nhfb,
  annote =       "36 papers; See also 6836.1508 1990 9th for papers",
  classification = "C1160 (Combinatorial mathematics); C4210 (Formal
                 logic); C4240 (Programming and algorithm theory); C4250
                 (Database theory)",
  corpsource =   "California Univ., Santa Cruz, CA, USA",
  generalterms = "Design; Languages; Management; Performance; Theory;
                 Verification",
  keywords =     "ACM; classify; computational complexity; database
                 systems; database theory; Datalog; Datalog(not=);
                 directed graphs; equalities; expressibility; expressive
                 power; fixed subgraph homeomorphism queries; formal
                 logic; inequalities; infinitary logic; P not=NP; query
                 language; SIGACT; tools; two-person pebble games",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Datalog}",
  treatment =    "T Theoretical or Mathematical",
  xxpages =      "110--134",
}

@InProceedings{Saraiya:1990:HPS,
  author =       "Yatin P. Saraiya",
  title =        "Hard problems for simple logic programs",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "64--73",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p64-saraiya/p64-saraiya.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p64-saraiya/",
  abstract =     "A number of optimizations have been proposed for
                 Datalog programs involving a single intensional
                 predicate (``single-IDB programs''). Examples include
                 the detection of {\em commutativity\/} and {\em
                 separability\/} ([Naug88],[RSUV89], [Ioan89a]) in
                 linear logic programs, and the detection of {\em
                 ZYT-linearizability\/} ([ZYT88], [RSUV89], [Sara89],
                 [Sara90]) in nonlinear programs. We show that the
                 natural generalizations of the commutativity and
                 ZYT-linearizability problems (respectively, the {\em
                 sequencability\/} and {\em base-case linearizability\/}
                 problems) are undecidable. Our constructions involve
                 the simulation of context-free grammars using
                 single-IDB programs that have a bounded number of
                 initialisation rules. The constructions may be used to
                 show that containment (or equivalence) is undecidable
                 for such programs, even if the programs are linear, or
                 if each program contains a single recursive rule. These
                 results tighten those of [Shmu87] and [Abit89].",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Languages",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Logic and constraint programming}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Datalog}; Mathematics of Computing --- Numerical
                 Analysis --- Optimization (G.1.6): {\bf Linear
                 programming}; Theory of Computation --- Mathematical
                 Logic and Formal Languages --- Grammars and Other
                 Rewriting Systems (F.4.2): {\bf Grammar types};
                 Computing Methodologies --- Artificial Intelligence ---
                 Deduction and Theorem Proving (I.2.3)",
}

@InProceedings{Carey:1990:LCL,
  author =       "Michael J. Carey and Sanjay Krishnamurthi and Miron
                 Livny",
  title =        "Load control for locking: the 'half-and-half'
                 approach",
  crossref =     "ACM:1990:PPN",
  pages =        "72--84",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p72-carey/p72-carey.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p72-carey/",
  abstract =     "A number of concurrency control performance studies
                 have shown that, under high levels of data contention,
                 concurrency control algorithms can exhibit thrashing
                 behavior which is detrimental to overall system
                 performance. In this paper, we present an approach to
                 eliminating thrashing in the case of two-phase locking,
                 a widely used concurrency control algorithm. Our
                 solution, which we call the `Half-and-Half' Algorithm,
                 involves monitoring the state of the DBMS in order to
                 dynamically control the multiprogramming level of the
                 system. Results from a performance study indicate that
                 the Half-and-Half algorithm can be very effective at
                 preventing thrashing under a wide range of operating
                 conditions and workloads.",
  acknowledgement = ack-nhfb,
  classification = "C6150J (Operating systems); C6160B (Distributed
                 DBMS)",
  corpsource =   "Dept. of Comput. Sci., wisconsin Univ., Madison, WI,
                 USA",
  generalterms = "Algorithms; Design; Experimentation; Management;
                 Measurement; Performance; Theory",
  keywords =     "concurrency control; concurrency control algorithms;
                 DBMS; dynamically control; half-and-half algorithm;
                 load control; multiprogramming; multiprogramming level;
                 two-phase locking",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Physical Design (H.2.2)",
  treatment =    "P Practical",
}

@InProceedings{Wang:1990:PTD,
  author =       "Ke Wang",
  title =        "Polynomial time designs toward both {BCNF} and
                 efficient data manipulation",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "74--83",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p74-wang/p74-wang.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p74-wang/",
  abstract =     "We define the independence-reducibility based on a
                 modification of key dependencies, which has better
                 computational properties and is more practically useful
                 than the original one based on key dependencies. Using
                 this modification as a tool, we design BCNF databases
                 that are highly desirable with respect to updates
                 and/or query answering. In particular, given a set U of
                 attributes and a set F of functional dependencies over
                 U, we characterize when F can be embedded in a database
                 scheme over U that is independent and is BCNF with
                 respect to F, a polynomial time algorithm that tests
                 this characterization and produces such a database
                 scheme whenever possible is presented. The produced
                 database scheme contains the fewest possible number of
                 relation schemes. Then we show that designs of
                 embedding constant-time-maintainable BCNF schemes and
                 of embedding independence-reducible schemes share
                 exactly the same method with the above design. Finally,
                 a simple modification of this method yields a
                 polynomial time algorithm for designing embedding
                 separable BCNF schemes.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Theory of Computation --- Computation by Abstract
                 Devices --- Complexity Measures and Classes (F.1.3):
                 {\bf Reducibility and completeness}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Normal forms}",
}

@InProceedings{Atzeni:1990:EUI,
  author =       "Paolo Atzeni and Riccardo Torlone",
  title =        "Efficient updates to independent schemes in the weak
                 instance model",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "84--93",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p84-atzeni/p84-atzeni.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p84-atzeni/",
  abstract =     "{\em The weak instance model is a framework to
                 consider the relations in a database as a whole,
                 regardless of the way attributes are grouped in the
                 individual relations. Queries and updates can be
                 performed involving any set of attributes. The
                 management of updates is based on a lattice structure
                 on the set of legal states, and inconsistencies and
                 ambiguities can arise\/} \par

                 {\em In the general case, the test for inconsistency
                 and determinism may involve the application of the
                 chase algorithm to the whole database. In this paper it
                 is shown how, for the highly significant class of
                 independent schemes, updates can be handled
                 efficiently, considering only the relevant portion of
                 the database}.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Theory of Computation ---
                 Analysis of Algorithms and Problem Complexity ---
                 General (F.2.0)",
}

@InProceedings{Agrawal:1990:LCS,
  author =       "Divyakant Agrawal and Amr {El Abbadi}",
  title =        "Locks with constrained sharing (extended abstract)",
  crossref =     "ACM:1990:PPN",
  pages =        "85--93",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p85-agrawal/p85-agrawal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p85-agrawal/",
  abstract =     "In this paper, we propose a new mode for locks that
                 permits sharing in a constrained manner. We develop a
                 family of locking protocols, the strictest of which is
                 the two phase locking protocol while the most
                 permissive recognizes all conflict-preserving
                 serializable histories. This is the first locking-based
                 protocol that can recognize the entire class of
                 conflict-preserving serializable histories.",
  acknowledgement = ack-nhfb,
  classification = "B6150 (Communication system theory); C6160B
                 (Distributed DBMS)",
  corpsource =   "Dept. of Comput. Sci., California Univ., Santa
                 Barbara, CA, USA",
  generalterms = "Design; Management; Performance; Reliability;
                 Standardization; Theory; Verification",
  keywords =     "concurrency control; conflict-preserving serializable
                 histories; constrained sharing; distributed databases;
                 locks; protocols; two phase locking protocol",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computer Systems Organization ---
                 Computer-Communication Networks --- Network Protocols
                 (C.2.2); Information Systems --- Database Management
                 --- Physical Design (H.2.2); Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf
                 Concurrency}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Distributed
                 databases}",
  treatment =    "P Practical; T Theoretical or Mathematical",
}

@InProceedings{Saraiya:1990:ETD,
  author =       "Y. P. Saraiya",
  title =        "On the Efficiency of Transforming Database Logic
                 Programs",
  crossref =     "ACM:1990:PPN",
  pages =        "87--109",
  year =         "1990",
  bibdate =      "Mon Mar 16 09:51:33 MST 1998",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/pods.bib",
  acknowledgement = ack-nhfb,
  annote =       "36 papers; See also 6836.1508 1990 9th for papers",
  keywords =     "ACM; database systems; SIGACT",
}

@InProceedings{Salzberg:1990:FDS,
  author =       "Betty Salzberg and Alex Tsukerman and Jim Gray and
                 Michael Stuewart and Susan Uren and Bonnie Vaughan",
  title =        "{FastSort}: a distributed single-input single-output
                 external sort",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "94--101",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p94-salzberg/p94-salzberg.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p94-salzberg/",
  abstract =     "External single-input single-output sorts can use
                 multiple processors each with a large tournament
                 replacement-selection in memory, and each with private
                 disks to sort an input stream in linear elapsed time.
                 Of course, increased numbers of processors, memories,
                 and disks are required as the input file size grows.
                 This paper analyzes the algorithm and reports the
                 performance of an implementation.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- Nonnumerical Algorithms and
                 Problems (F.2.2): {\bf Sorting and searching};
                 Mathematics of Computing --- Numerical Analysis ---
                 General (G.1.0): {\bf Parallel algorithms}; Theory of
                 Computation --- Computation by Abstract Devices ---
                 Modes of Computation (F.1.2): {\bf Parallelism and
                 concurrency}; Mathematics of Computing --- Discrete
                 Mathematics --- Graph Theory (G.2.2): {\bf Trees}",
}

@InProceedings{Fekete:1990:SGC,
  author =       "Alan Fekete and Nancy Lynch and William E. Weihl",
  title =        "A serialization graph construction for nested
                 transactions",
  crossref =     "ACM:1990:PPN",
  pages =        "94--108",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p94-fekete/p94-fekete.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p94-fekete/",
  abstract =     "This paper makes three contributions. First, we
                 present a proof technique that offers system designers
                 the same ease of reasoning about nested transaction
                 systems as is given by the classical theory for systems
                 without nesting, and yet can be used to verify that a
                 system satisfies the robust ``user view'' definition of
                 correctness of [10]. Second, as applications of the
                 technique, we verify the correctness of Moss'
                 read/write locking algorithm for nested transactions,
                 and of an undo logging algorithm that has not
                 previously been presented or proved for nested
                 transaction systems. Third, we make explicit the
                 assumptions used for this proof technique, assumptions
                 that are usually made {\em implicitly\/} in the
                 classical theory, and therefore we clarify the type of
                 system for which the classical theory itself can
                 reliably be used.",
  acknowledgement = ack-nhfb,
  classification = "C1160 (Combinatorial mathematics); C4250 (Database
                 theory); C6160B (Distributed DBMS)",
  corpsource =   "Sydney Univ., NSW, Australia",
  generalterms = "Algorithms; Design; Management; Performance;
                 Reliability; Theory; Verification",
  keywords =     "concurrency control; database theory; distributed
                 databases; graph theory; Moss read/write locking
                 algorithm; nested transactions; proof technique;
                 serialization graph construction; transaction
                 processing; undo logging algorithm; user view
                 definition of correctness",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing}",
  treatment =    "P Practical; T Theoretical or Mathematical",
}

@InProceedings{Graefe:1990:EPV,
  author =       "Goetz Graefe",
  title =        "Encapsulation of parallelism in the {Volcano} query
                 processing system",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "102--111",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p102-graefe/p102-graefe.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p102-graefe/",
  abstract =     "Volcano is a new dataflow query processing system we
                 have developed for database systems research and
                 education. The uniform interface between operators
                 makes Volcano extensible by new operators. All
                 operators are designed and coded as if they were meant
                 for a single-process system only. When attempting to
                 parallelize Volcano, we had to choose between two
                 models of parallelization, called here the {\em
                 bracket\/} and {\em operator\/} models. We describe the
                 reasons for not choosing the bracket model, introduce
                 the novel operator model, and provide details of
                 Volcano's {\em exchange\/} operator that parallelizes
                 all other operators. It allows intra-operator
                 parallelism on partitioned datasets and both vertical
                 and horizontal inter-operator parallelism. The exchange
                 operator encapsulates all parallelism issues and
                 therefore makes implementation of parallel database
                 algorithms significantly easier and more robust.
                 Included in this encapsulation is the translation
                 between demand-driven dataflow within processes and
                 data-driven dataflow between processes. Since the
                 interface between Volcano operators is similar to the
                 one used in ``real,'' commercial systems, the
                 techniques described here can be used to parallelize
                 other query processing engines.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Concurrency}; Theory of Computation ---
                 Computation by Abstract Devices --- Modes of
                 Computation (F.1.2): {\bf Parallelism and concurrency};
                 Mathematics of Computing --- Numerical Analysis ---
                 General (G.1.0): {\bf Parallel algorithms}",
}

@InProceedings{Weikum:1990:MLR,
  author =       "Gerhard Weikum and Christof Hasse and Peter Broessler
                 and Peter Muth",
  title =        "Multi-level recovery",
  crossref =     "ACM:1990:PPN",
  pages =        "109--123",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p109-weikum/p109-weikum.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p109-weikum/",
  abstract =     "Multi-level transactions have received considerable
                 attention as a framework for high-performance
                 concurrency control methods. An inherent property of
                 multi-level transactions is the need for compensating
                 actions, since state-based recovery methods do no
                 longer work correctly for transaction undo. The
                 resulting requirement of operation logging adds to the
                 complexity of crash recovery. In addition, multi-level
                 recovery algorithms have to take into account that
                 high-level actions are not necessarily atomic, e.g., if
                 multiple pages are updated in a single action. \par

                 In this paper, we present a recovery algorithm for
                 multi-level transactions. Unlike typical commercial
                 database systems, we have striven for simplicity rather
                 than employing special tricks. It is important to note,
                 though, that simplicity is not achieved at the expense
                 of performance. We show how a high-performance
                 multi-level recovery algorithm can be systematically
                 developed based on few fundamental principles. The
                 presented algorithm has been implemented in the DASDBS
                 database kernel system.",
  acknowledgement = ack-nhfb,
  classification = "C6160B (Distributed DBMS)",
  corpsource =   "Dept. of Comput. Sci., ETH Zurich, Switzerland",
  generalterms = "Algorithms; Design; Management; Performance; Theory",
  keywords =     "concurrency control; DASDBS database kernel system;
                 multi-level recovery algorithm; multi-level
                 transactions; transaction processing",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Physical Design (H.2.2): {\bf Recovery and restart};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}",
  treatment =    "P Practical",
}

@InProceedings{Bernstein:1990:IRR,
  author =       "Philip A. Bernstein and Meichun Hsu and Bruce Mann",
  title =        "Implementing recoverable requests using queues",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "112--122",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p112-bernstein/p112-bernstein.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p112-bernstein/",
  abstract =     "Transactions have been rigorously defined and
                 extensively studied in the database and transaction
                 processing literature, but little has been said about
                 the handling of the {\em requests\/} for transaction
                 execution in commercial TP systems, especially
                 distributed ones, managing the flow of requests is
                 often as important as executing the transactions
                 themselves. \par

                 This paper studies fault-tolerant protocols for
                 managing the flow of transaction requests between
                 clients that issue requests and servers that process
                 them. We discuss how to implement these protocols using
                 transactions and {\em recoverable queuing systems}.
                 Queuing systems are used to move requests reliably
                 between clients and servers. The protocols use queuing
                 systems to ensure that the server processes each
                 request exactly once and that a client processes each
                 reply at least once. We treat request-reply protocols
                 for single-transaction requests, for multi-transaction
                 requests, and for requests that require interaction
                 with the display after the request is submitted.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Reliability; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Distributed databases};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Mathematics of
                 Computing --- Miscellaneous (G.m): {\bf Queueing
                 theory**}",
}

@InProceedings{Solworth:1990:WOD,
  author =       "Jon A. Solworth and Cyril U. Orji",
  title =        "Write-only disk caches",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "123--132",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p123-solworth/p123-solworth.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p123-solworth/",
  abstract =     "With recent declines in the cost of semiconductor
                 memory and the increasing need for high performance I/O
                 disk systems, it makes sense to consider the design of
                 large caches. In this paper, we consider the effect of
                 caching writes. We show that cache sizes in the range
                 of a few percent allow writes to be performed at
                 negligible or no cost and independently of locality
                 considerations.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Performance",
  subject =      "Hardware --- Memory Structures --- Design Styles
                 (B.3.2): {\bf Cache memories}; Software --- Operating
                 Systems --- Storage Management (D.4.2): {\bf Secondary
                 storage}; Information Systems --- Database Management
                 --- Systems (H.2.4); Software --- Operating Systems ---
                 Process Management (D.4.1): {\bf Scheduling}",
}

@InProceedings{Tay:1990:OSM,
  author =       "Y. C. Tay",
  title =        "On the optimality of strategies for multiple joins",
  crossref =     "ACM:1990:PPN",
  pages =        "124--131",
  year =         "1990",
  bibdate =      "Wed Oct 25 08:47:37 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p124-tay/p124-tay.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p124-tay/",
  acknowledgement = ack-nhfb,
  classification = "C4250 (Database theory); C6160D (Relational DBMS)",
  corpsource =   "Dept. of Math., Nat. Univ. of Singapore, Kent Ridge,
                 Singapore",
  keywords =     "database theory; expression evaluation; multiple
                 joins; orderings; relational databases; relations;
                 searched subspace; strategy optimality; tuples",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Saraiya:1990:PTP,
  author =       "Yatin P. Saraiya",
  title =        "Polynomial-time program transformations in deductive
                 databases",
  crossref =     "ACM:1990:PPN",
  pages =        "132--144",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p132-saraiya/p132-saraiya.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p132-saraiya/",
  abstract =     "We investigate the complexity of various optimization
                 techniques for logic databases. In particular, we
                 provide polynomial-time algorithms for restricted
                 versions of common program transformations, and show
                 that a minor relaxation of these restrictions leads to
                 {\em NP\/} -hardness. To this end, we define the $k$
                 -containment problem on conjunctive queries, and show
                 that while the 2-containment problem is in {\em P}, the
                 3-containment problem is {\em NP\/} -complete. These
                 results provide a complete description of the
                 complexity of conjunctive query containment. We also
                 extend these results to provide a natural
                 characterization of certain optimization problems in
                 logic databases, such as the detection of
                 sequencability and commutativity among pairs of Linear
                 rules, the detection of 1-boundedness in sirups, and
                 the detection of ZYT-linearizability in simple
                 nonlinear recursions.",
  acknowledgement = ack-nhfb,
  classification = "C1180 (Optimisation techniques); C1230 (Artificial
                 intelligence); C4210 (Formal logic); C4240 (Programming
                 and algorithm theory); C4250 (Database theory); C6160Z
                 (Other DBMS)",
  corpsource =   "Dept. of Comput. Sci., Stanford Univ., CA, USA",
  generalterms = "Algorithms; Design; Management; Performance; Theory;
                 Verification",
  keywords =     "1-boundedness; 2- containment problem; 3-containment
                 problem; commutativity; complexity; computational
                 complexity; conjunctive query containment; database
                 management systems; database theory; deductive
                 databases; formal logic; k-containment problem;
                 knowledge based systems; linear rules; logic databases;
                 logic programming; nonlinear recursions; NP-complete;
                 NP-hardness; optimisation; optimization; polynomial
                 time program transformations; polynomial-time
                 algorithms; query languages; sequencability; sirups;
                 ZYT- linearizability",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3); Information
                 Systems --- Database Management --- Database
                 Applications (H.2.8); Computing Methodologies ---
                 Artificial Intelligence --- Automatic Programming
                 (I.2.2): {\bf Program transformation}; Mathematics of
                 Computing --- Numerical Analysis --- Optimization
                 (G.1.6)",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Wolfson:1990:NPP,
  author =       "Ouri Wolfson and Aya Ozeri",
  title =        "A new paradigm for parallel and distributed
                 rule-processing",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "133--142",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p133-wolfson/p133-wolfson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p133-wolfson/",
  abstract =     "This paper is concerned with the parallel evaluation
                 of datalog rule programs, mainly by processors that are
                 interconnected by a communication network. We introduce
                 a paradigm, called data-reduction, for the parallel
                 evaluation of a general datalog program. Several
                 parallelization strategies discussed previously in [CW,
                 GST, W, WS] are special cases of this paradigm. The
                 paradigm parallelizes the evaluation by partitioning
                 among the processors the instantiations of the rules.
                 After presenting the paradigm, we discuss the following
                 issues, that we see fundamental for parallelization
                 strategies derived from the paradigm properties of the
                 strategies that enable a reduction in the communication
                 overhead, decomposability, load balancing, and
                 application to programs with negation. We prove that
                 decomposability, a concept introduced previously in
                 [WS, CW], is undecidable.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Datalog}; Mathematics of
                 Computing --- Numerical Analysis --- General (G.1.0):
                 {\bf Parallel algorithms}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Concurrency}",
}

@InProceedings{Ganguly:1990:FPP,
  author =       "Sumit Ganguly and Avi Silberschatz and Shalom Tsur",
  title =        "A framework for the parallel processing of {Datalog}
                 queries",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "143--152",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p143-ganguly/p143-ganguly.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p143-ganguly/",
  abstract =     "This paper presents several complementary methods for
                 the parallel, bottom-up evaluation of Datalog queries.
                 We introduce the notion of a {\em discriminating
                 predicate}, based on hash functions, that partitions
                 the computation between the processors in order to
                 achieve parallelism. A parallelization scheme with the
                 property of non-redundant computation (no duplication
                 of computation by processors) is then studied in
                 detail. The mapping of Datalog programs onto a network
                 of processors, such that the results is a non-redundant
                 computation, is also studied. The methods reported in
                 this paper clearly demonstrate the trade-offs between
                 redundancy and interprocessor-communication for this
                 class of problems.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Languages (H.2.3): {\bf
                 Datalog}; Information Systems --- Database Management
                 --- Systems (H.2.4): {\bf Query processing}; Theory of
                 Computation --- Computation by Abstract Devices ---
                 Modes of Computation (F.1.2): {\bf Parallelism and
                 concurrency}; Theory of Computation --- Computation by
                 Abstract Devices --- Complexity Measures and Classes
                 (F.1.3)",
}

@InProceedings{Plambeck:1990:STR,
  author =       "Thane Plambeck",
  title =        "Semigroup techniques in recursive query optimization",
  crossref =     "ACM:1990:PPN",
  pages =        "145--153",
  year =         "1990",
  bibdate =      "Wed Oct 25 08:47:37 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p145-plambeck/p145-plambeck.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p145-plambeck/",
  acknowledgement = ack-nhfb,
  classification = "C1110 (Algebra); C1160 (Combinatorial mathematics);
                 C1180 (Optimisation techniques); C4250 (Database
                 theory); C6160 (Database management systems (DBMS))",
  corpsource =   "Dept. of Comput. Sci., Stanford Univ., CA, USA",
  keywords =     "codify; database theory; group theory; mathematical
                 semigroup theory; optimisation; program boundedness;
                 query languages; recursive query optimization; rule
                 commutativity; set theory",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Kogan:1990:CCM,
  author =       "Boris Kogan and S. Jajodia",
  title =        "Concurrency control in multilevel-secure databases
                 based on replicated architecture",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "153--162",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p153-kogan/p153-kogan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p153-kogan/",
  abstract =     "In a multilevel secure database management system
                 based on the {\em replicated\/} architecture, there is
                 a separate database management system to manage data at
                 or below each security level, and lower level data are
                 replicated in all databases containing higher level
                 data. In this paper, we address the open issue of
                 concurrency control in such a system. We give a secure
                 protocol that guarantees one-copy serializability of
                 concurrent transaction executions and can be
                 implemented in such a way that the size of the trusted
                 code (including the code required for concurrency and
                 recovery) is small.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Security",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Information Systems
                 --- Database Management --- Systems (H.2.4): {\bf
                 Transaction processing}; Information Systems ---
                 Database Management --- General (H.2.0): {\bf Security,
                 integrity, and protection**}; Computer Applications ---
                 Computers in Other Systems (J.7): {\bf Military}",
}

@InProceedings{Elkan:1990:ILD,
  author =       "Charles Elkan",
  title =        "Independence of logic database queries and updates",
  crossref =     "ACM:1990:PPN",
  pages =        "154--160",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p154-elkan/p154-elkan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p154-elkan/",
  abstract =     "A query is independent of an update if executing the
                 update cannot change the result of evaluating the
                 query. The theorems of this paper give methods for
                 proving independence in concrete cases, taking into
                 account integrity constraints, recursive rules, and
                 arbitrary queries. First we define the notion of
                 independence model-theoretically, and we prove basic
                 properties of the concept. Then we provide
                 proof-theoretic conditions for a conjunctive query to
                 be independent of an update. Finally, we prove correct
                 an induction scheme for showing that a recursive query
                 is independent of an update.",
  acknowledgement = ack-nhfb,
  classification = "C4210 (Formal logic); C4250 (Database theory); C6160
                 (Database management systems (DBMS))",
  corpsource =   "Dept. of Comput. Sci., Toronto Univ., Ont., Canada",
  generalterms = "Design; Management; Performance; Theory;
                 Verification",
  keywords =     "arbitrary queries; conjunctive query; database theory;
                 formal logic; independence; induction; integrity
                 constraints; logic database queries; logic database
                 update; proof-theoretic conditions; query languages;
                 recursive query; recursive rules",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3): {\bf Logic
                 programming}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Query processing};
                 Information Systems --- Database Management ---
                 Database Applications (H.2.8)",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Ross:1990:MSM,
  author =       "Kenneth A. Ross",
  title =        "Modular stratification and magic sets for Datalog
                 programs with negation",
  crossref =     "ACM:1990:PPN",
  pages =        "161--171",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p161-ross/p161-ross.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p161-ross/",
  abstract =     "We propose a class of programs, called modularly
                 stratified programs that have several attractive
                 properties. Modular stratification generalizes
                 stratification and local stratification, while allowing
                 programs that are not expressible by stratified
                 programs. For modularly stratified programs the
                 well-founded semantics coincides with the stable model
                 semantics, and makes every ground literal true or
                 false. Modularly stratified programs are all weakly
                 stratified, but the converse is false. Unlike some
                 weakly stratified programs, modularly stratified
                 programs can be evaluated in a subgoal-at-a-time
                 fashion. We demonstrate a technique for rewriting a
                 modularly stratified program for bottom-up evaluation
                 and extend this rewriting to include magic-set
                 techniques. The rewritten program, when evaluated
                 bottom-up, gives the same answers as the well-founded
                 semantics. We discuss extending modular stratification
                 to other operators such as set-grouping and aggregation
                 that have traditionally been stratified to prevent
                 semantic difficulties.",
  acknowledgement = ack-nhfb,
  classification = "C4210 (Formal logic); C4240 (Programming and
                 algorithm theory); C4250 (Database theory)",
  corpsource =   "Stanford Univ., CA, USA",
  generalterms = "Design; Languages; Management; Performance; Theory",
  keywords =     "aggregation; bottom-up evaluation; database theory;
                 Datalog programs; logic programming; magic sets;
                 modular stratification; modularly stratified programs;
                 negation; operators; programming theory; rewriting;
                 rewriting systems; set-grouping",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Datalog}; Software ---
                 Programming Techniques --- General (D.1.0); Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1)",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Badrinath:1990:PES,
  author =       "B. R. Badrinath and Krithi Ramamritham",
  title =        "Performance evaluation of semantics-based multilevel
                 concurrency control protocols",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "163--172",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p163-badrinath/p163-badrinath.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p163-badrinath/",
  abstract =     "For next generation information systems, concurrency
                 control mechanisms are required to handle high level
                 abstract operations and to meet high throughput
                 demands. The currently available single level
                 concurrency control mechanisms for {\em reads\/} and
                 {\em writes\/} are inadequate for future complex
                 information systems. In this paper, we will present a
                 new {\em multilevel\/} concurrency protocol that uses a
                 semantics-based notion of conflict, which is weaker
                 than commutativity, called {\em recoverability}.
                 Further, operations are scheduled according to {\em
                 relative conflict}, a conflict notion based on the
                 structure of operations. \par

                 Performance evaluation via extensive simulation studies
                 show that with our multilevel concurrency control
                 protocol, the performance improvement is significant
                 when compared to that of a single level two-phase
                 locking based concurrency control scheme or to that of
                 a multilevel concurrency control scheme based on
                 commutativity alone. Further, simulation studies show
                 that our new multilevel concurrency control protocol
                 performs better even with resource contention.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Theory of
                 Computation --- Computation by Abstract Devices ---
                 Modes of Computation (F.1.2): {\bf Parallelism and
                 concurrency}; Information Systems --- Database
                 Management --- General (H.2.0): {\bf Security,
                 integrity, and protection**}; Computer Applications ---
                 Computers in Other Systems (J.7): {\bf Military}",
}

@InProceedings{You:1990:TVF,
  author =       "Jia-Huai You and Li Yan Yuan",
  title =        "Three-valued formalization of logic programming: is it
                 needed?",
  crossref =     "ACM:1990:PPN",
  pages =        "172--182",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p172-you/p172-you.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p172-you/",
  abstract =     "The central issue of this paper concerns the truth
                 value {\em undefined\/} in Przymusinski's 3-valued
                 formalization of nonmonotonic reasoning and logic
                 programming. We argue that this formalization can lead
                 to the problem of unintended semantics and loss of
                 disjunctive information. We modify the formalization by
                 proposing two general principles for logic program
                 semantics: {\em justifiability\/} and {\em minimal
                 undefinedness}. The former is shown to be a general
                 property for almost all logic program semantics, and
                 the latter requires the use of the undefined only when
                 it is necessary. We show that there are three types of
                 information embedded in the undefined: the disjunctive,
                 the factoring, and the ``difficult-to-be-assigned''. In
                 the modified formalization, the first two can be
                 successfully identified and branched into multiple
                 models. This leaves only the
                 ``difficult-to-be-assigned'' as the undefined. It is
                 shown that the truth value undefined is needed only for
                 a very special type of programs whose practicality is
                 yet to be evidenced.",
  acknowledgement = ack-nhfb,
  classification = "C1230 (Artificial intelligence); C4210 (Formal
                 logic); C4240 (Programming and algorithm theory); C4250
                 (Database theory)",
  corpsource =   "Dept. of Comput. Sci., Alberta Univ., Edmonton, Alta.,
                 Canada",
  generalterms = "Design; Languages; Management; Performance; Theory;
                 Verification",
  keywords =     "database theory; difficult-to-be-assigned;
                 disjunctive; disjunctive information; factoring; formal
                 logic; justifiably; logic program semantics; logic
                 programming; minimal undefinedness; nonmonotonic
                 reasoning; programming theory; Przymusinski 3-valued
                 formalization; ternary logic; truth value; unintended
                 semantics",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Deduction and Theorem Proving (I.2.3): {\bf Logic
                 programming}; Computing Methodologies --- Artificial
                 Intelligence --- Deduction and Theorem Proving (I.2.3):
                 {\bf Nonmonotonic reasoning and belief revision};
                 Theory of Computation --- Logics and Meanings of
                 Programs --- Semantics of Programming Languages
                 (F.3.2); Information Systems --- Database Management
                 --- Logical Design (H.2.1)",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Motro:1990:QDK,
  author =       "Amihai Motro and Qiuhui Yuan",
  title =        "Querying database knowledge",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "173--183",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p173-motro/p173-motro.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p173-motro/",
  abstract =     "The role of database knowledge is usually limited to
                 the evaluation of data queries. In this paper we argue
                 that when this knowledge is of substantial volume and
                 complexity, there is genuine need to query this
                 repository of information. Moreover, since users of the
                 database may not be able to distinguish between
                 information that is data and information that is
                 knowledge, access to knowledge and data should be
                 provided with a single, coherent instrument. We provide
                 an informal review of various kinds of knowledge
                 queries, with possible syntax and semantics. We then
                 formalize a framework of knowledge-rich databases, and
                 a simple query language consisting of a pair of
                 retrieve and describe statements. The retrieve
                 statement is for querying the data (it corresponds to
                 the basic retrieval statement of various knowledge-rich
                 database systems). The describe statement is for
                 querying the knowledge. Essentially, it inquires about
                 the meaning of a concept under specified circumstances.
                 We provide algorithms for evaluating sound and finite
                 knowledge answers to describe queries, and we
                 demonstrate them with examples.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Languages (H.2.3):
                 {\bf Query languages}; Information Systems --- Database
                 Management --- Logical Design (H.2.1): {\bf Data
                 models}; Computing Methodologies --- Artificial
                 Intelligence --- Knowledge Representation Formalisms
                 and Methods (I.2.4): {\bf Representations (procedural
                 and rule-based)}",
}

@InProceedings{Royer:1990:BCE,
  author =       "V{\'e}ronique Royer",
  title =        "Backward chaining evaluation in stratified disjunctive
                 theories",
  crossref =     "ACM:1990:PPN",
  pages =        "183--195",
  year =         "1990",
  bibdate =      "Wed Oct 25 08:47:37 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p183-royer/p183-royer.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p183-royer/",
  acknowledgement = ack-nhfb,
  classification = "C1230 (Artificial intelligence); C4210 (Formal
                 logic); C4240 (Programming and algorithm theory); C4250
                 (Database theory); C6160 (Database management systems
                 (DBMS))",
  corpsource =   "Dept. of Comput. Sci., ONERA, Toulouse, France",
  keywords =     "atomic queries; backward chaining computation;
                 database management systems; database theory; deductive
                 databases; fixpoint; formal logic; knowledge based
                 systems; logic programming; minimal clauses;
                 programming theory; stratified disjunctive databases;
                 stratified disjunctive theories",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Laenens:1990:ELP,
  author =       "Els Laenens and Domenico Sacca and Dirk Vermeir",
  title =        "Extending logic programming",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "184--193",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p184-laenens/p184-laenens.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p184-laenens/",
  abstract =     "{\em An extension of logic programming, called
                 ``ordered logic programming'', which includes some
                 abstractions of the object-oriented paradigm, is
                 presented. An ordered program consists of a number of
                 modules (objects), where each module is composed by a
                 number of rules possibly with negated head predicates.
                 A sort of ``isa'' hierarchy can be defined among the
                 modules in order to allow for rule inheritance.
                 Therefore, every module sees its own rules as local
                 rules and the rules of the other modules to which it is
                 connected by the ``isa'' hierarchy as global rules. In
                 this way, as local rules may hide global rules, it is
                 possible to deal with default properties and
                 exceptions. This new approach represents a novel
                 attempt to combine the logic paradigm with the
                 object-oriented one in knowledge base systems.
                 Moreover, this approach provides a new ground for
                 explaining some recent proposals of semantics for
                 classical logic programs with negation in the rule
                 bodies and gives an interesting semantics to logic
                 programs with negated rule heads}.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Verification",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Logic and constraint programming}; Software ---
                 Software Engineering --- Design Tools and Techniques
                 (D.2.2): {\bf Modules and interfaces}; Computing
                 Methodologies --- Artificial Intelligence --- Knowledge
                 Representation Formalisms and Methods (I.2.4): {\bf
                 Representations (procedural and rule-based)}; Software
                 --- Programming Techniques --- General (D.1.0);
                 Information Systems --- Database Management --- Systems
                 (H.2.4)",
}

@InProceedings{Chrysanthis:1990:AFS,
  author =       "Panayiotis K. Chrysanthis and Krithi Ramamritham",
  title =        "{ACTA}: a framework for specifying and reasoning about
                 transaction structure and behavior",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "194--203",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p194-chrysanthis/p194-chrysanthis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p194-chrysanthis/",
  abstract =     "Recently, a number of extensions to the traditional
                 transaction model have been proposed to support new
                 information-intensive applications such as CAD/CAM and
                 software development. However, these extended models
                 capture only a subset of interactions that can be found
                 in such applications, and represent only some of the
                 points within the spectrum of interactions possible in
                 competitive and cooperative environments. \par

                 {\em ACTA\/} is a formalizable framework developed for
                 characterizing the whole spectrum of interactions. The
                 ACTA framework is {\em not\/} yet another transaction
                 model, but is intended to unify the existing models.
                 ACTA allows for specifying the {\em structure\/} and
                 the {\em behavior\/} of transactions as well as for
                 reasoning about the concurrency and recovery properties
                 of the transactions. In ACTA, the semantics of
                 interactions are expressed in terms of transactions'
                 effects on the commit and abort of other transactions
                 and on objects' state and concurrency status (i.e.,
                 synchronization state). Its ability to capture the
                 semantics of previously proposed transaction models is
                 indicative of its generality. The reasoning
                 capabilities of this framework have also been tested by
                 using the framework to study the properties of a new
                 model that is derived by combining two existing
                 transaction models.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing}; Software
                 --- Operating Systems --- File Systems Management
                 (D.4.3); Information Systems --- Database Management
                 --- Logical Design (H.2.1): {\bf Data models};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Concurrency}",
}

@InProceedings{Schlipf:1990:EPL,
  author =       "John S. Schlipf",
  title =        "The expressive powers of the logic programming
                 semantics (extended abstract)",
  crossref =     "ACM:1990:PPN",
  pages =        "196--204",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p196-schlipf/p196-schlipf.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p196-schlipf/",
  abstract =     "We compare the expressive powers of three semantics
                 for deductive databases and logic programming: the
                 3-valued program completion semantics, the well-founded
                 semantics, and the stable semantics, We identify the
                 expressive power of the stable semantics, and in fairly
                 general circumstances that of the well-founded
                 semantics. \par

                 Over infinite Herbrand models, where the three
                 semantics have equivalent expressive power, we also
                 consider a notion of uniform translatability between
                 the 3-valued program completion and well-founded
                 semantics. In this sense of uniform translatability we
                 show the well-founded semantics to be more
                 expressive.",
  acknowledgement = ack-nhfb,
  annote =       "36 papers; See also 6836.1508 1990 9th for papers",
  classification = "C1230 (Artificial intelligence); C4210 (Formal
                 logic); C4240 (Programming and algorithm theory); C4250
                 (Database theory); C6160 (Database management systems
                 (DBMS))",
  corpsource =   "Cincinnati Univ., OH, USA",
  generalterms = "Design; Languages; Management; Performance; Theory;
                 Verification",
  keywords =     "3-valued program completion semantics; ACM; database
                 management systems; database systems; database theory;
                 deductive databases; expressive powers; infinite
                 Herbrand models; knowledge based systems; logic
                 programming; logic programming semantics; programming
                 theory; SIGACT; stable semantics; ternary logic;
                 uniform translatability; well- founded semantics",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Theory of Computation --- Logics and Meanings of
                 Programs --- Semantics of Programming Languages
                 (F.3.2); Theory of Computation --- Mathematical Logic
                 and Formal Languages --- Mathematical Logic (F.4.1):
                 {\bf Logic and constraint programming}; Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3): {\bf Logic programming};
                 Computing Methodologies --- Artificial Intelligence ---
                 Deduction and Theorem Proving (I.2.3): {\bf Deduction};
                 Information Systems --- Database Management ---
                 Database Applications (H.2.8)",
  treatment =    "T Theoretical or Mathematical",
  xxpages =      "64--86",
}

@InProceedings{Dayal:1990:OLR,
  author =       "Umeshwar Dayal and Meichun Hsu and Rivka Ladin",
  title =        "Organizing long-running activities with triggers and
                 transactions",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "204--214",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p204-dayal/p204-dayal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p204-dayal/",
  abstract =     "This paper addresses the problem of organising and
                 controlling activities that involve multiple steps of
                 processing and that typically are of long duration. We
                 explore the use of triggers and transactions to specify
                 and organize such long-running activities. Triggers
                 offer data- or event-driven specification of control
                 flow, and thus provide a flexible and modular framework
                 with which the control structures of the activities can
                 be extended or modified. We describe a model based on
                 event-condition-action rules and coupling modes. The
                 execution of these rules is governed by an extended
                 nested transaction model. Through a detailed example,
                 we illustrate the utility of the various features of
                 the model for chaining related steps without
                 sacrificing concurrency, for enforcing integrity
                 constraints, and for providing flexible failure and
                 exception handling.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management ---
                 Languages (H.2.3); Computer Applications --- Life and
                 Medical Sciences (J.3): {\bf Medical information
                 systems}; Theory of Computation --- Analysis of
                 Algorithms and Problem Complexity --- Nonnumerical
                 Algorithms and Problems (F.2.2): {\bf Computations on
                 discrete structures}",
}

@InProceedings{Sacca:1990:SMN,
  author =       "Domenico Sacca and Carlo Zaniolo",
  title =        "Stable models and nondeterminism in logic programs
                 with negation",
  crossref =     "ACM:1990:PPN",
  pages =        "205--217",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p205-sacca/p205-sacca.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p205-sacca/",
  abstract =     "Previous researchers have proposed generalizations of
                 Horn clause logic to support negation and
                 non-determinism as two separate extensions. In this
                 paper, we show that the stable model semantics for
                 logic programs provides a unified basis for the
                 treatment of both concepts. First, we introduce the
                 concepts of partial models, stable models, strongly
                 founded models and deterministic models and other
                 interesting classes of partial models and study their
                 relationships. We show that the maximal deterministic
                 model of a program is a subset of the intersection of
                 all its stable models and that the well-founded model
                 of a program is a subset of its maximal deterministic
                 model. Then, we show that the use of stable models
                 subsumes the use of the non-deterministic {\em
                 choice\/} construct in LDL and provides an alternative
                 definition of the semantics of this construct. Finally,
                 we provide a constructive definition for stable models
                 with the introduction of a procedure, called {\em
                 backtracking fixpoint,\/} that non-deterministically
                 constructs a total stable model, if such a model
                 exists.",
  acknowledgement = ack-nhfb,
  classification = "C1230 (Artificial intelligence); C4210 (Formal
                 logic); C4240 (Programming and algorithm theory); C4250
                 (Database theory); C6160 (Database management systems
                 (DBMS))",
  corpsource =   "Dipartimento di Sistemi, Calabria Univ., Rende,
                 Italy",
  generalterms = "Design; Languages; Management; Performance;
                 Reliability; Theory; Verification",
  keywords =     "backtracking fixpoint; database management systems;
                 database theory; deterministic models; formal logic;
                 knowledge based systems; logic programming; logic
                 programs; negation; nondeterminism; partial models;
                 programming theory; stable model semantics; strongly
                 founded models",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Theory of Computation --- Mathematical Logic and
                 Formal Languages --- Mathematical Logic (F.4.1): {\bf
                 Logic and constraint programming}; Computing
                 Methodologies --- Artificial Intelligence --- Deduction
                 and Theorem Proving (I.2.3): {\bf Logic programming};
                 Theory of Computation --- Computation by Abstract
                 Devices --- Modes of Computation (F.1.2): {\bf
                 Alternation and nondeterminism}; Theory of Computation
                 --- Logics and Meanings of Programs --- Semantics of
                 Programming Languages (F.3.2)",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Breitbart:1990:RTM,
  author =       "Yuri Breitbart and Avi Silberschatz and Glenn R.
                 Thompson",
  title =        "Reliable transaction management in a multidatabase
                 system",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "215--224",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p215-breitbart/p215-breitbart.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p215-breitbart/",
  abstract =     "A model of a multidatabase system is defined in which
                 each local DBMS uses the two-phase locking protocol
                 Locks are released by a global transaction only after
                 the transaction commits or aborts at each local site.
                 Failures may occur during the processing of
                 transactions. We design a fault tolerant transaction
                 management algorithm and recovery procedures that
                 retain global database consistency. We also show that
                 our algorithms ensure freedom from global deadlocks of
                 any kind.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Software ---
                 Operating Systems --- Process Management (D.4.1): {\bf
                 Scheduling}",
}

@InProceedings{Abiteboul:1990:NDL,
  author =       "Serge Abiteboul and Eric Simon and Victor Vianu",
  title =        "Non-deterministic languages to express deterministic
                 transformations",
  crossref =     "ACM:1990:PPN",
  pages =        "218--229",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p218-abiteboul/p218-abiteboul.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p218-abiteboul/",
  abstract =     "The use of non-deterministic database languages is
                 motivated using pragmatic and theoretical
                 considerations. It is shown that non-determinism
                 resolves some difficulties concerning the expressive
                 power of deterministic languages: there are
                 non-deterministic languages expressing low complexity
                 classes of queries/updates, whereas no such
                 deterministic languages exist. Various mechanisms
                 yielding non-determinism are reviewed. The focus is on
                 two closely related families of non-deterministic
                 languages. The first consists of extensions of {\em
                 Datalog\/} with negations in bodies and/or heads of
                 rules, with non-deterministic fixpoint semantics. The
                 second consists of non-deterministic extensions of
                 first-order logic and fixpoint logics, using the {\em
                 witness\/} operator. The ability of the various
                 non-deterministic languages to express {\em
                 deterministic\/} transformation is characterized. In
                 particular, non-deterministic languages expressing
                 exactly the queries/updates computable in polynomial
                 time are exhibited, whereas it is conjectured that no
                 analogous deterministic language exists. The connection
                 between non-deterministic languages and determinism is
                 also explored. Several problems of practical interest
                 are examined, such as checking (statically or
                 dynamically) if a given program is deterministic,
                 detecting coincidence of deterministic and
                 non-deterministic semantics, and verifying termination
                 for non-deterministic programs.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Management; Performance; Theory;
                 Verification",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3); Theory of Computation ---
                 Computation by Abstract Devices --- Modes of
                 Computation (F.1.2): {\bf Alternation and
                 nondeterminism}",
}

@InProceedings{Abiteboul:1990:NLE,
  author =       "S. Abiteboul and E. Simon and V. Vianu",
  title =        "Nondeterministic languages to express deterministic
                 transformations",
  crossref =     "ACM:1990:PPN",
  pages =        "218--229",
  year =         "1990",
  bibdate =      "Mon Mar 16 10:08:58 MST 1998",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/pods.bib",
  acknowledgement = ack-nhfb,
  classification = "C4210 (Formal logic); C4240 (Programming and
                 algorithm theory); C4250 (Database theory); C6140D
                 (High level languages); C6160 (Database management
                 systems (DBMS))",
  corpsource =   "INRIA, Le Chesnay, France",
  keywords =     "database management systems; database theory; Datalog;
                 deterministic languages; deterministic transformations;
                 expressive power; first-order logic; fixpoint logics;
                 formal logic; logic programming; negations;
                 nondeterministic database languages; nondeterministic
                 fixpoint semantics; polynomial time; programming
                 theory; query languages; witness operator",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Cacace:1990:IOO,
  author =       "F. Cacace and S. Ceri and S. Crespi-Reghizzi and L.
                 Tanca and R. Zicari",
  title =        "Integrating object-oriented data modelling with a
                 rule-based programming paradigm",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "225--236",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p225-cacace/p225-cacace.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p225-cacace/",
  abstract =     "LOGRES is a new project for the development of
                 extended database systems which is based on the
                 integration of the object-oriented data modelling
                 paradigm and of the rule-based approach for the
                 specification of queries and updates. \par

                 The data model supports generalization hierarchies and
                 object sharing, the rule-based language extends {\em
                 Datalog\/} to support generalized type constructors
                 (sets, multisets, and sequences), rule-based integrity
                 constraints are automatically produced by analyzing
                 schema definitions. Modularization is a fundamental
                 feature, as modules encapsulate queries and updates,
                 when modules are applied to a LOGRES database, their
                 side effects can be controlled. \par

                 The LOGRES project is a follow-up of the ALGRES
                 project, and takes advantage of the ALGRES programming
                 environment for the development of a fast prototype.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf
                 Datalog}",
}

@InProceedings{Yannakakis:1990:GTM,
  author =       "Mihalis Yannakakis",
  title =        "Graph-theoretic methods in database theory",
  crossref =     "ACM:1990:PPN",
  pages =        "230--242",
  year =         "1990",
  bibdate =      "Wed Oct 25 08:47:37 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p230-yannakakis/p230-yannakakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p230-yannakakis/",
  acknowledgement = ack-nhfb,
  classification = "C1160 (Combinatorial mathematics); C4250 (Database
                 theory)",
  corpsource =   "AT and T Bell Labs., Murray Hill, NJ, USA",
  keywords =     "database theory; dynamic problem; graph theory; main
                 memory model; online queries; online updates; parallel
                 algorithms; path problems; query processing; recursive
                 queries; searching graphs; semiring computations;
                 transitive closure",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  treatment =    "B Bibliography; T Theoretical or Mathematical",
}

@InProceedings{Kiernan:1990:MDD,
  author =       "G. Kiernan and C. de Maindreville and E. Simon",
  title =        "Making deductive databases a practical technology: a
                 step forward",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "237--246",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p237-kiernan/p237-kiernan.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p237-kiernan/",
  abstract =     "Deductive databases provide a formal framework to
                 study rule-based query languages that are extensions of
                 first-order logic. However, deductive database
                 languages and their current implementations do not seem
                 appropriate for improving the development of real
                 applications or even sample of them. Our goal is to
                 make deductive database technology practical. The
                 design and implementation of the RDL1 system, presented
                 in this paper, constitute a step toward this goal. Our
                 approach is based on the integration of a production
                 rule language within a relational database system, the
                 development of a rule-based programming environment and
                 the support of system extensibility using Abstract Data
                 Types. We discuss important practical experience gained
                 during the implementation of the system. Also,
                 comparisons with related work such as LDL, STARBURST
                 and POSTGRES are given.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Languages; Performance; Theory",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Datalog}; Theory of Computation
                 --- Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1); Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}; Software --- Programming Languages ---
                 Language Constructs and Features (D.3.3): {\bf Abstract
                 data types}; Information Systems --- Database
                 Management --- Systems (H.2.4); Software ---
                 Programming Languages --- Language Classifications
                 (D.3.2): {\bf LISP}",
}

@InProceedings{Willard:1990:QAP,
  author =       "Dan E. Willard",
  title =        "Quasilinear algorithms for processing relational
                 calculus expressions (preliminary report)",
  crossref =     "ACM:1990:PPN",
  pages =        "243--257",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p243-willard/p243-willard.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p243-willard/",
  abstract =     "Throughout this paper q will denote a query such that
                 I is the number of tuples inputted into the query, and
                 U is the number of tuples in its output. We will say
                 that q has quasi-linear complexity iff for some
                 constant d, it is executable in time O(U + I log d I)
                 and space O(I + U). This article will define a large
                 subset of the relational calculus, called RCS, and show
                 that all RCS queries are executable by quasi-linear
                 algorithms. \par

                 Our algorithm does not require the maintenance of any
                 complex index, as it builds all the needed data
                 structures during the course of the executing
                 algorithm. Its exponent d can be large for some
                 particular queries q, but it is a quite nice constant
                 equal to 1 or 0 in most practical cases. Our algorithm
                 is intended for data bases stored in main memory, and
                 its time O(U + I log d I) should amount to only a few
                 seconds of CPU time in many practical applications.
                 \par

                 Chapter 10 of this paper lists some open questions for
                 further investigation.",
  acknowledgement = ack-nhfb,
  classification = "C4250 (Database theory); C6160D (Relational DBMS)",
  corpsource =   "Dept. of Comput. Sci., State Univ. of New York,
                 Albany, NY, USA",
  generalterms = "Algorithms; Design; Management; Performance; Theory;
                 Verification",
  keywords =     "computational complexity; data structures; database
                 theory; main memory; quasi-linear complexity; query
                 languages; RCS; RCS queries; relational calculus
                 expressions; relational databases",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computing Methodologies --- Symbolic and Algebraic
                 Manipulation --- Algorithms (I.1.2); Computing
                 Methodologies --- Symbolic and Algebraic Manipulation
                 --- Expressions and Their Representation (I.1.1);
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Relational databases}",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Mumick:1990:MR,
  author =       "I. S. Mumick and S. J. Finkelstein and Hamid Pirahesh
                 and Raghu Ramakrishnan",
  title =        "Magic is relevant",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "247--258",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p247-mumick/p247-mumick.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p247-mumick/",
  abstract =     "We define the magic-sets transformation for
                 traditional relational systems (with duplicates,
                 aggregation and grouping), as well as for relational
                 systems extended with recursion. We compare the
                 magic-sets rewriting to traditional optimization
                 techniques for nonrecursive queries, and use
                 performance experiments to argue that the magic-sets
                 transformation is often a better optimization
                 technique.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Experimentation; Languages;
                 Performance",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Information Systems ---
                 Database Management --- Languages (H.2.3): {\bf SQL}",
}

@InProceedings{Abdel-Ghaffar:1990:ODA,
  author =       "Khaled A. S. Abdel-Ghaffar and Amr {El Abbadi}",
  title =        "On the optimality of disk allocation for {Cartesian}
                 product files (extended abstract)",
  crossref =     "ACM:1990:PPN",
  pages =        "258--264",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p258-abdel-ghaffar/p258-abdel-ghaffar.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p258-abdel-ghaffar/",
  abstract =     "In this paper we present a coding-theoretic analysis
                 of the disk allocation problem. We provide both
                 necessary and sufficient conditions for the existence
                 of strictly optimal allocation methods. Based on a
                 class of optimal codes, known as maximum distance
                 separable codes, strictly optimal allocation methods
                 are constructed. Using the necessary conditions proved,
                 we argue that the standard definition of strict
                 optimality is too strong, and cannot be attained in
                 general. A new criterion for optimality is therefore
                 defined whose objective is to design allocation methods
                 that yield a response time of one for all queries with
                 a minimum number of specified attributes. Using coding
                 theory, we determined this minimum number for binary
                 files, assuming that the number of disks is a power of
                 two. In general, our approach provides better
                 allocation methods than previous techniques.",
  acknowledgement = ack-nhfb,
  classification = "C1260 (Information theory); C4250 (Database theory);
                 C6120 (File organisation)",
  corpsource =   "Dept of Electr. Eng. and Comput. Sci., California
                 Univ., Davis, CA, USA",
  generalterms = "Design; Management; Measurement; Performance; Theory;
                 Verification",
  keywords =     "binary files; Cartesian product files;
                 coding-theoretic analysis; database theory; disk
                 allocation; information theory; maximum distance
                 separable codes; necessary conditions; optimal
                 allocation methods; response time; storage allocation",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Data --- Coding and Information Theory (E.4); Software
                 --- Operating Systems --- Storage Management (D.4.2):
                 {\bf Allocation/deallocation strategies}; Information
                 Systems --- Information Storage and Retrieval ---
                 Information Storage (H.3.2): {\bf File organization}",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Widom:1990:SOP,
  author =       "Jennifer Widom and S. J. Finkelstein",
  title =        "Set-oriented production rules in relational database
                 systems",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "259--270",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p259-widom/p259-widom.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p259-widom/",
  abstract =     "We propose incorporating a production rules facility
                 into a relational database system. Such a facility
                 allows definition of database operations that are
                 automatically executed whenever certain conditions are
                 met. In keeping with the set-oriented approach of
                 relational data manipulation languages, our production
                 rules are also set-oriented--they are triggered by sets
                 of changes to the database and may perform sets of
                 changes. The condition and action parts of our
                 production rules may refer to the current state of the
                 database as well as to the sets of changes triggering
                 the rules. We define a syntax for production rule
                 definition as an extension to SQL. A model of system
                 behavior is used to give an exact semantics for
                 production rule execution, taking into account
                 externally-generated operations, self-triggering rules,
                 and simultaneous triggering of multiple rules.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Computing
                 Methodologies --- Artificial Intelligence --- Knowledge
                 Representation Formalisms and Methods (I.2.4): {\bf
                 Representations (procedural and rule-based)};
                 Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf SQL}; Information Systems ---
                 Database Management --- Systems (H.2.4)",
}

@InProceedings{Aref:1990:EPW,
  author =       "Walid G. Aref and Hanan Samet",
  title =        "Efficient processing of window queries in the pyramid
                 data structure",
  crossref =     "ACM:1990:PPN",
  pages =        "265--272",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p265-aref/p265-aref.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p265-aref/",
  abstract =     "Window operations serve as the basis of a number of
                 queries that can be posed in a spatial database.
                 Examples of these window-based queries include the
                 exist query (i.e., determining whether or not a spatial
                 feature exists inside a window) and the report query,
                 (i.e., reporting the identity of all the features that
                 exist inside a window). Algorithms are described for
                 answering window queries in ($n$ log log {$T$}) time
                 for a window of size $n$ x $n$ in a feature space
                 (e.g., an image) of size {$T$} x {$T$} (e.g., pixel
                 elements). The significance of this result is that even
                 though the window contains $n$ 2 pixel elements, the
                 worst-case time complexity of the algorithms is almost
                 linearly proportional (and not quadratic) to the window
                 diameter, and does not depend on other factors. The
                 above complexity bounds are achieved via the
                 introduction of the incomplete pyramid data structure
                 (a variant of the pyramid data structure) as the
                 underlying representation to store spatial features and
                 to answer queries on them.",
  acknowledgement = ack-nhfb,
  classification = "C4240 (Programming and algorithm theory); C4250
                 (Database theory); C6160Z (Other DBMS)",
  corpsource =   "Inst. for Adv. Comput. Studies, Maryland Univ.,
                 College Park, MD, USA",
  generalterms = "Algorithms; Design; Management; Measurement;
                 Performance; Theory",
  keywords =     "computational complexity; data structures; database
                 management systems; database theory; exist query; pixel
                 elements; pyramid data structure; report query; spatial
                 database; window queries; worst-case time complexity",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Data --- Data
                 Structures (E.1)",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Hanson:1990:PMA,
  author =       "Eric N. Hanson and Moez Chaabouni and Chang-Ho Kim and
                 Yu-Wang Wang",
  title =        "A predicate matching algorithm for database rule
                 systems",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "271--280",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p271-hanson/p271-hanson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p271-hanson/",
  abstract =     "Forward-chaining rule systems must test each newly
                 asserted fact against a collection of predicates to
                 find those rules that match the fact. Expert system
                 rule engines use a simple combination of hashing and
                 sequential search for this matching. We introduce an
                 algorithm for finding the matching predicates that is
                 more efficient than the standard algorithm when the
                 number of predicates is large. We focus on equality and
                 inequality predicates on totally ordered domains. This
                 algorithm is well-suited for database rule systems,
                 where predicate-testing speed is critical. A key
                 component of the algorithm is the {\em interval binary
                 search tree\/} (IBS-tree). The IBS-tree is designed to
                 allow efficient retrieval of all intervals (e.g., range
                 predicates) that overlap a point, while allowing
                 dynamic insertion and deletion of intervals. The
                 algorithm could also be used to improve the performance
                 of forward-chaining inference engines for large expert
                 systems applications.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Computing Methodologies --- Artificial Intelligence
                 --- Knowledge Representation Formalisms and Methods
                 (I.2.4): {\bf Representations (procedural and
                 rule-based)}; Computing Methodologies --- Artificial
                 Intelligence --- Problem Solving, Control Methods, and
                 Search (I.2.8): {\bf Heuristic methods}; Theory of
                 Computation --- Analysis of Algorithms and Problem
                 Complexity --- Nonnumerical Algorithms and Problems
                 (F.2.2): {\bf Sorting and searching}; Computing
                 Methodologies --- Artificial Intelligence --- Problem
                 Solving, Control Methods, and Search (I.2.8): {\bf
                 Graph and tree search strategies}; Information Systems
                 --- Database Management --- Systems (H.2.4)",
}

@InProceedings{Johnson:1990:FPA,
  author =       "Theodore Johnson and Dennis Shasha",
  title =        "A framework for the performance analysis of concurrent
                 {B}-tree algorithms",
  crossref =     "ACM:1990:PPN",
  pages =        "273--287",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p273-johnson/p273-johnson.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p273-johnson/",
  abstract =     "Many concurrent B-tree algorithms have been proposed,
                 but they have not yet been satisfactorily analyzed.
                 When transaction processing systems require high levels
                 of concurrency, a restrictive serialization technique
                 on the B-tree index can cause a bottleneck. In this
                 paper, we present a framework for constructing
                 analytical performance models of concurrent B-tree
                 algorithms. The models can predict the response time
                 and maximum throughput. We analyze three algorithms:
                 Naive Lock-coupling, Optimistic Descent, and the
                 Lehman-Yao algorithm. The analyses are validated by
                 simulations of the algorithms on actual B-trees. Simple
                 and instructive rules of thumb for predicting
                 performance are also derived. We apply the analyses to
                 determine the effect of database recovery on B-tree
                 concurrency.",
  acknowledgement = ack-nhfb,
  classification = "C4250 (Database theory); C6160B (Distributed DBMS)",
  corpsource =   "Courant Inst. of Math. Sci., New York Univ., NY, USA",
  generalterms = "Algorithms; Design; Experimentation; Management;
                 Measurement; Performance; Theory",
  keywords =     "B-tree index; concurrency control; concurrent B-tree
                 algorithms; data structures; database recovery;
                 database theory; distributed databases; Lehman-Yao
                 algorithm; maximum throughput; naive lock-coupling;
                 optimistic descent; performance analysis; response
                 time; transaction processing systems; trees
                 (mathematics)",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Concurrency}; Mathematics of
                 Computing --- Discrete Mathematics --- General (G.2.0);
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Transaction processing}; Mathematics of
                 Computing --- Probability and Statistics (G.3): {\bf
                 Queueing theory}",
  treatment =    "T Theoretical or Mathematical",
}

@InProceedings{Stonebraker:1990:RPC,
  author =       "Michael Stonebraker and Anant Jhingran and Jeffrey Goh
                 and Spyros Potamianos",
  title =        "On rules, procedure, caching and views in data base
                 systems",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "281--290",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p281-stonebraker/p281-stonebraker.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p281-stonebraker/",
  abstract =     "This paper demonstrates that a simple rule system can
                 be constructed that supports a more powerful view
                 system than available in current commercial systems.
                 Not only can views be specified by using rules but also
                 special semantics for resolving ambiguous view updates
                 are simply additional rules. Moreover, procedural data
                 types as proposed in POSTGRES are also efficiently
                 simulated by the same rules system. Lastly, caching of
                 the action part of certain rules is a possible
                 performance enhancement and can be applied to
                 materialize views as well as to cache procedural data
                 items. Hence, we conclude that a rule system is a
                 fundamental concept in a next generation DBMS, and it
                 subsumes both views and procedures as special cases.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Information Systems ---
                 Database Management --- Languages (H.2.3)",
}

@InProceedings{Lassez:1990:QC,
  author =       "Jean-Louis Lassez",
  title =        "Querying constraints",
  crossref =     "ACM:1990:PPN",
  pages =        "288--298",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p288-lassez/p288-lassez.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p288-lassez/",
  abstract =     "The design of languages to tackle constraint
                 satisfaction problems has a long history. Only more
                 recently the reverse problem of introducing constraints
                 as primitive constructs in programming languages has
                 been addressed. A main task that the designers and
                 implementors of such languages face is to use and adapt
                 the concepts and algorithms from the extensive studies
                 on constraints done in areas such as Mathematical
                 Programming, Symbolic Computation, Artificial
                 Intelligence, Program Verification and Computational
                 Geometry. In this paper, we illustrate this task in a
                 simple and yet important domain: linear arithmetic
                 constraints. We show how one can design a querying
                 system for sets of linear constraints by using basic
                 concepts from logic programming and symbolic
                 computation, as well as algorithms from linear
                 programming and computational geometry. We conclude by
                 reporting briefly on how notions of negation and
                 canonical representation used in linear constraints can
                 be generalized to account for cases in term algebras,
                 symbolic computation, affine geometry, and elsewhere.",
  acknowledgement = ack-nhfb,
  classification = "C6140D (High level languages); C6160 (Database
                 management systems (DBMS))",
  corpsource =   "IBM Thomas J. Watson Res. Center, Yorktown Heights,
                 NY, USA",
  generalterms = "Algorithms; Design; Languages; Management;
                 Performance; Theory",
  keywords =     "affine geometry; canonical representation;
                 computational geometry; constraint satisfaction
                 problems; database management systems; linear
                 arithmetic constraints; linear programming; logic
                 programming; negation; primitive constructs;
                 programming languages; query languages; querying
                 system; reverse problem; symbolic computation; term
                 algebras",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3); Mathematics of Computing ---
                 Numerical Analysis --- Optimization (G.1.6): {\bf
                 Linear programming}; Computing Methodologies ---
                 Artificial Intelligence --- Deduction and Theorem
                 Proving (I.2.3): {\bf Logic programming}; Theory of
                 Computation --- Mathematical Logic and Formal Languages
                 --- Mathematical Logic (F.4.1): {\bf Logic and
                 constraint programming}",
  treatment =    "P Practical",
}

@InProceedings{Rosenthal:1990:QGI,
  author =       "Arnon Rosenthal and Cesar Galindo-Legaria",
  title =        "Query graphs, implementing trees, and
                 freely-reorderable outerjoins",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "291--299",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p291-rosenthal/p291-rosenthal.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p291-rosenthal/",
  abstract =     "We determine when a join/outerjoin query can be
                 expressed unambiguously as a query graph, without an
                 explicit specification of the order of evaluation. To
                 do so, we first characterize the set of expression
                 trees that implement a given join/outerjoin query
                 graph, and investigate the existence of transformations
                 among the various trees. Our main theorem is that a
                 join/outerjoin query is freely reorderable if the query
                 graph derived from it falls within a particular class,
                 every tree that ``implements'' such a graph evaluates
                 to the same result. \par

                 The result has applications to language design and
                 query optimization. Languages that generate queries
                 within such a class do not require the user to indicate
                 priority among join operations, and hence may present a
                 simplified syntax. And it is unnecessary to add
                 extensive analyses to a conventional query optimizer in
                 order to generate legal reorderings for a
                 freely-reorderable language.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Performance; Theory",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Mathematics of
                 Computing --- Discrete Mathematics --- Graph Theory
                 (G.2.2): {\bf Trees}; Theory of Computation ---
                 Analysis of Algorithms and Problem Complexity ---
                 Nonnumerical Algorithms and Problems (F.2.2): {\bf
                 Computations on discrete structures}",
}

@InProceedings{Kanellakis:1990:CQL,
  author =       "Paris C. Kanellakis and Gabriel M. Kuper and Peter Z.
                 Revesz",
  title =        "Constraint query languages (preliminary report)",
  crossref =     "ACM:1990:PPN",
  pages =        "299--313",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p299-kanellakis/p299-kanellakis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p299-kanellakis/",
  abstract =     "We discuss the relationship between constraint
                 programming and database query languages. We show that
                 bottom-up, efficient, declarative database programming
                 can be combined with efficient constraint solving. The
                 key intuition is that the generalization of a ground
                 fact, or tuple, is a conjunction of constraints. We
                 describe the basic Constraint Query Language design
                 principles, and illustrate them with four different
                 classes of constraints: Polynomial, rational order,
                 equality, and Boolean constraints.",
  acknowledgement = ack-nhfb,
  annote =       "36 papers; See also 6836.1508 1990 9th for papers",
  classification = "C6140D (High level languages); C6160 (Database
                 management systems (DBMS))",
  corpsource =   "Brown Univ., Providence, RI, USA",
  generalterms = "Design; Languages; Management; Performance; Theory",
  keywords =     "ACM; constraint programming; constraint solving;
                 database management systems; database query languages;
                 database systems; declarative database programming;
                 logic programming; query languages; SIGACT",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Mathematics of Computing --- Numerical Analysis ---
                 Optimization (G.1.6): {\bf Constrained optimization};
                 Information Systems --- Database Management ---
                 Languages (H.2.3); Computing Methodologies --- Computer
                 Graphics --- Computational Geometry and Object Modeling
                 (I.3.5)",
  treatment =    "P Practical",
  xxpages =      "26--52",
}

@InProceedings{Shekita:1990:PEP,
  author =       "Eugene J. Shekita and Michael J. Carey",
  title =        "A performance evaluation of pointer-based joins",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "300--311",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p300-shekita/p300-shekita.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p300-shekita/",
  abstract =     "In this paper we describe three pointer-based join
                 algorithms that are simple variants of the
                 nested-loops, sort-merge, and hybrid-hash join
                 algorithms used in relational database systems. Each
                 join algorithm is described and an analysis is carried
                 out to compare the performance of the pointer-based
                 algorithms to their standard, non-pointer-based
                 counterparts. The results of the analysis show that the
                 pointer-based algorithms can provide significant
                 performance gains in many situations. The results also
                 show that the pointer-based nested-loops join
                 algorithm, which is perhaps the most natural
                 pointer-based join algorithm to consider using in an
                 object-oriented database system, performs quite poorly
                 on most medium to large joins.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Information Systems --- Database Management ---
                 Logical Design (H.2.1): {\bf Data models}; Information
                 Systems --- Database Management --- Systems (H.2.4):
                 {\bf Query processing}; Theory of Computation ---
                 Analysis of Algorithms and Problem Complexity ---
                 Nonnumerical Algorithms and Problems (F.2.2): {\bf
                 Sorting and searching}",
}

@InProceedings{Ioannidis:1990:RAO,
  author =       "Y. E. Ioannidis and Younkyung Kang",
  title =        "Randomized algorithms for optimizing large join
                 queries",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "312--321",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p312-ioannidis/p312-ioannidis.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p312-ioannidis/",
  abstract =     "Query optimization for relational database systems is
                 a combinatorial optimization problem, which makes
                 exhaustive search unacceptable as the query size grows.
                 Randomized algorithms, such as Simulated Annealing (SA)
                 and Iterative Improvement (II), are viable alternatives
                 to exhaustive search. We have adapted these algorithms
                 to the optimization of project-select-join queries. We
                 have tested them on large queries of various types with
                 different databases, concluding that in most cases SA
                 identifies a lower cost access plan than II. To explain
                 this result, we have studied the shape of the cost
                 function over the solution space associated with such
                 queries and we have conjectured that it resembles a
                 `cup' with relatively small variations at the bottom.
                 This has inspired a new Two Phase Optimization
                 algorithm, which is a combination of Simulated
                 Annealing and Iterative Improvement. Experimental
                 results show that Two Phase Optimization outperforms
                 the original algorithms in terms of both output quality
                 and running time.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Experimentation; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}; Theory of Computation ---
                 Analysis of Algorithms and Problem Complexity ---
                 Miscellaneous (F.2.m); Mathematics of Computing ---
                 Numerical Analysis --- Optimization (G.1.6)",
}

@InProceedings{Mumick:1990:MC,
  author =       "Inderpal Singh Mumick and Sheldon J. Finkelstein and
                 Hamid Pirahesh and Raghu Ramakrishnan",
  title =        "Magic conditions",
  crossref =     "ACM:1990:PPN",
  pages =        "314--330",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p314-mumick/p314-mumick.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p314-mumick/",
  abstract =     "Much recent work has focussed on the bottom-up
                 evaluation of Datalog programs. One approach, called
                 Magic-Sets, is based on rewriting a logic program so
                 that bottom-up fixpoint evaluation of the program
                 avoids generation of irrelevant facts ([BMSU86, BR87,
                 Ram88]). It is widely believed that the principal
                 application of the Magic-Sets technique is to restrict
                 computation in recursive queries using equijoin
                 predicates. We extend the Magic-Set transformation to
                 use predicates other than equality ({$X$} 10, for
                 example). This Extended Magic-Set technique has
                 practical utility in ``real'' relational databases, not
                 only for recursive queries, but for non-recursive
                 queries as well; in ([MFPR90]) we use the results in
                 this paper and those in [MPR89] to define a magic-set
                 transformation for relational databases supporting SQL
                 and its extensions, going on to describe an
                 implementation of magic in Starburst ([HFLP89]). We
                 also give preliminary performance measurements. \par

                 In extending Magic-Sets, we describe a natural
                 generalization of the common class of bound $b$ and
                 free () adornments. We also present a formalism to
                 compare adornment classes.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Design; Languages; Management;
                 Performance; Reliability; Theory; Verification",
  subject =      "Information Systems --- Database Management ---
                 Languages (H.2.3): {\bf Datalog}; Theory of Computation
                 --- Mathematical Logic and Formal Languages ---
                 Mathematical Logic (F.4.1): {\bf Logic and constraint
                 programming}; Computing Methodologies --- Artificial
                 Intelligence --- Deduction and Theorem Proving (I.2.3):
                 {\bf Logic programming}; Theory of Computation ---
                 Mathematical Logic and Formal Languages --- Grammars
                 and Other Rewriting Systems (F.4.2): {\bf Parallel
                 rewriting systems}",
}

@InProceedings{Mumick:1990:MCR,
  author =       "I. S. Mumick and S. J. Finkelstein and H. Pirahesh and
                 Ramakrishnan and R.",
  title =        "Magic conditions (relational queries)",
  crossref =     "ACM:1990:PPN",
  pages =        "314--380",
  year =         "1990",
  bibdate =      "Mon Mar 16 10:08:58 MST 1998",
  bibsource =    "https://www.math.utah.edu/pub/tex/bib/pods.bib",
  acknowledgement = ack-nhfb,
  classification = "C4250 (Database theory); C6140D (High level
                 languages); C6160D (Relational DBMS)",
  corpsource =   "Stanford Univ., CA, USA",
  keywords =     "adornment classes; bottom-up evaluation; database
                 theory; Datalog programs; equijoin predicates; logic
                 program; Magic-Sets; query languages; recursive
                 queries; relational databases",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  treatment =    "P Practical",
}

@InProceedings{Beckmann:1990:RTE,
  author =       "Norbert Beckmann and Hans-Peter Kriegel and Ralf
                 Schneider and Bernhard Seeger",
  title =        "The {R$^*$-tree}: an efficient and robust access
                 method for points and rectangles",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "322--331",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p322-beckmann/p322-beckmann.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p322-beckmann/",
  abstract =     "The R-tree, one of the most popular access methods for
                 rectangles, is based on the heuristic optimization of
                 the area of the enclosing rectangle in each inner node.
                 By running numerous experiments in a standardized
                 testbed under highly varying data, queries and
                 operations, we were able to design the R * -tree which
                 incorporates a combined optimization of area, margin
                 and overlap of each enclosing rectangle in the
                 directory. Using our standardized testbed in an
                 exhaustive performance comparison, it turned out that
                 the R * -tree clearly outperforms the existing R-tree
                 variants. Guttman's linear and quadratic R-tree and
                 Greene's variant of the R-tree. This superiority of the
                 R * -tree holds for different types of queries and
                 operations, such as map overlay, for both rectangles
                 and multidimensional points in all experiments. From a
                 practical point of view the R * -tree is very
                 attractive because of the following two reasons 1 it
                 efficiently supports point and spatial data at the same
                 time and 2 its implementation cost is only slightly
                 higher than that of other R-trees.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Performance",
  subject =      "Data --- Data Structures (E.1): {\bf Trees}; Theory of
                 Computation --- Analysis of Algorithms and Problem
                 Complexity --- Nonnumerical Algorithms and Problems
                 (F.2.2): {\bf Sorting and searching}; Information
                 Systems --- Database Management --- Logical Design
                 (H.2.1): {\bf Data models}",
}

@InProceedings{Haritsa:1990:BOA,
  author =       "Jayant R. Haritsa and Michael J. Carey and Miron
                 Livny",
  title =        "On being optimistic about real-time constraints",
  crossref =     "ACM:1990:PPN",
  pages =        "331--343",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p331-haritsa/p331-haritsa.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p331-haritsa/",
  abstract =     "Performance studies of concurrency control algorithms
                 for conventional database systems have shown that,
                 under most operating circumstances, locking protocols
                 outperform optimistic techniques. Real-time database
                 systems have special characteristics - timing
                 constraints are associated with transactions,
                 performance criteria are based on satisfaction of these
                 timing constraints, and scheduling algorithms are
                 priority driven. In light of these special
                 characteristics, results regarding the performance of
                 concurrency control algorithms need to be re-evaluated.
                 We show in this paper that the following parameters of
                 the real-time database system - its policy for dealing
                 with transactions whose constraints are not met, its
                 knowledge of transaction resource requirements, and the
                 availability of resources - have a significant impact
                 on the relative performance of the concurrency control
                 algorithms. In particular, we demonstrate that under a
                 policy that discards transactions whose constraints are
                 not met, optimistic concurrency control outperforms
                 locking over a wide range of system utilization. We
                 also outline why, for a variety of reasons, optimistic
                 algorithms appear well-suited to real-time database
                 systems.",
  acknowledgement = ack-nhfb,
  classification = "C6160B (Distributed DBMS)",
  corpsource =   "Dept. of Comput. Sci., Wisconsin Univ., Madison, WI,
                 USA",
  generalterms = "Algorithms; Design; Experimentation; Management;
                 Measurement; Performance; Theory",
  keywords =     "concurrency control; conventional database systems;
                 distributed databases; locking; locking protocols;
                 optimistic techniques; performance criteria; real-time
                 constraints; real-time database system; real-time
                 systems; scheduling algorithms; timing constraints;
                 transaction processing",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Computer Systems Organization --- Special-Purpose and
                 Application-Based Systems (C.3): {\bf Real-time and
                 embedded systems}; Information Systems --- Database
                 Management --- Systems (H.2.4): {\bf Transaction
                 processing}; Information Systems --- Database
                 Management --- Physical Design (H.2.2)",
  treatment =    "P Practical",
}

@InProceedings{Jagadish:1990:LCO,
  author =       "H. V. Jagadish",
  title =        "Linear clustering of objects with multiple
                 attributes",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "332--342",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p332-jagadish/p332-jagadish.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p332-jagadish/",
  abstract =     "There is often a need to map a multi-dimensional space
                 on to a one-dimensional space. For example, this kind
                 of mapping has been proposed to permit the use of
                 one-dimensional indexing techniques to a
                 multi-dimensional index space such as in a spatial
                 database. This kind of mapping is also of value in
                 assigning physical storage, such as assigning buckets
                 to records that have been indexed on multiple
                 attributes, to minimize the disk access effort. \par

                 In this paper, we discuss what the desired properties
                 of such a mapping are, and evaluate, through analysis
                 and simulation, several mappings that have been
                 proposed in the past. We present a mapping based on
                 Hilbert's space-filling curve, which out-performs
                 previously proposed mappings on average over a variety
                 of different operating conditions.",
  acknowledgement = ack-nhfb,
  generalterms = "Algorithms; Experimentation; Performance",
  subject =      "Theory of Computation --- Analysis of Algorithms and
                 Problem Complexity --- Nonnumerical Algorithms and
                 Problems (F.2.2): {\bf Sorting and searching};
                 Information Systems --- Database Management --- Logical
                 Design (H.2.1): {\bf Data models}; Data --- Files
                 (E.5): {\bf Sorting/searching}; Information Systems ---
                 Database Management --- Systems (H.2.4): {\bf Query
                 processing}",
}

@InProceedings{Orenstein:1990:CSQ,
  author =       "Jack Orenstein",
  title =        "A comparison of spatial query processing techniques
                 for native and parameter spaces",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "343--352",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p343-orenstein/p343-orenstein.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p343-orenstein/",
  abstract =     "Spatial queries can be evaluated in native space or in
                 a parameter space. In the latter case, data objects are
                 transformed into points and query objects are
                 transformed into search regions. The requirement for
                 different data and query representations may prevent
                 the use of parameter-space searching in some
                 applications. Native-space and parameter-space
                 searching are compared in the context of a z
                 order-based spatial access method. Experimental results
                 show that when there is a single query object,
                 searching in parameter space can be faster than
                 searching in native space, if the data and query
                 objects are large enough, and if sufficient redundancy
                 is used for the query representation. The result is,
                 however, less accurate than the native space result.
                 When there are multiple query objects, native-space
                 searching is better initially, but as the number of
                 query objects increases, parameter space searching with
                 low redundancy is superior. Native-space searching is
                 much more accurate for multiple-object queries.",
  acknowledgement = ack-nhfb,
  generalterms = "Design; Experimentation; Performance",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Query processing}; Theory of
                 Computation --- Analysis of Algorithms and Problem
                 Complexity --- Nonnumerical Algorithms and Problems
                 (F.2.2): {\bf Sorting and searching}; Data --- Files
                 (E.5): {\bf Sorting/searching}; Information Systems ---
                 Database Management --- Logical Design (H.2.1): {\bf
                 Data models}",
}

@InProceedings{Tam:1990:TTM,
  author =       "Va-On Tam and Meichun Hsu",
  title =        "Token transactions: managing fine-grained migration of
                 data",
  crossref =     "ACM:1990:PPN",
  pages =        "344--356",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/pods/298514/p344-tam/p344-tam.pdf;
                 http://www.acm.org/pubs/citations/proceedings/pods/298514/p344-tam/",
  abstract =     "Executing a transaction in a conventional distributed
                 database system involves the execution of several
                 subtransactions, each at a remote site where the data
                 reside and running a two-phase commit protocol at the
                 end of the transaction. With the advent of fast
                 communication networks, we consider an alternative
                 paradigm where the remote data being accessed are
                 dynamically {\em migrated\/} to the initiation site of
                 the transaction. One example of such a system is a
                 distributed shared virtual memory system. \par

                 In this paper, we examine the problem of recovery from
                 system failure in data migration systems. Most data
                 migration systems use the notion of {\em tokens\/} for
                 the access rights a site has on the data elements it
                 caches. Our goal is to recover the site's knowledge of
                 the set of tokens it owned when a system failure
                 occurred. Our approach is to consider the token
                 knowledge at each site as a fragment of a global {\em
                 token database\/} and the data migration activities as
                 {\em token transactions\/} that update this distributed
                 database. We have developed a unique commit protocol
                 for token transactions, called {\em unilateral
                 commit\/} (UCP), that efficiently achieves consistency
                 and recoverability of the token state. The correctness
                 of UCP with respect to the two-phase commit protocol is
                 also presented.",
  acknowledgement = ack-nhfb,
  classification = "C6160B (Distributed DBMS)",
  corpsource =   "Aiken Comput. Lab., Harvard Univ., Cambridge, MA,
                 USA",
  generalterms = "Algorithms; Design; Management; Performance;
                 Reliability; Standardization; Theory; Verification",
  keywords =     "commit protocol; correctness; data migration systems;
                 distributed database system; distributed databases;
                 distributed shared virtual memory system; fast
                 communication networks; fine-grained migration;
                 protocols; recovery; remote data; system failure;
                 system recovery; token database; token knowledge; token
                 transactions; transaction processing; two-phase commit
                 protocol; unilateral commit; virtual storage",
  sponsororg =   "SIGACT; SIGMOD; SIGART",
  subject =      "Information Systems --- Database Management ---
                 Systems (H.2.4): {\bf Transaction processing};
                 Information Systems --- Database Management --- Systems
                 (H.2.4): {\bf Distributed databases}; Computer Systems
                 Organization --- Computer-Communication Networks ---
                 Network Protocols (C.2.2); Information Systems ---
                 Database Management --- Database Administration
                 (H.2.7): {\bf Logging and recovery}",
  treatment =    "P Practical",
}

@InProceedings{Lomet:1990:PMA,
  author =       "David Lomet and Betty Salzberg",
  title =        "The performance of a multiversion access method",
  crossref =     "Garcia-Molina:1990:PAS",
  pages =        "353--363",
  year =         "1990",
  bibdate =      "Wed Oct 25 12:40:13 MDT 2000",
  bibsource =    "http://www.acm.org/pubs/contents/proceedings/series/sigmod_pods/;
                 https://www.math.utah.edu/pub/tex/bib/pods.bib",
  URL =          "http://www.acm.org/pubs/articles/proceedings/mod/93597/p353-lomet/p353-lomet.pdf;
                 http://www.acm.org/pubs/citations/proceedings/mod/93597/p353-lomet/",
  abstract =     "The {\em Time-Split B-tree\/} is an integrated index
                 structure for a versioned timestamped database. It
                 gradually migrates data from a current database to an
                 historical database, records migrating when nodes
                 split. Records valid at the split time are placed in
                 both an historical node and a current node. This
                 impl