@Preamble{
"\ifx \undefined \Dbar \def \Dbar {\leavevmode\raise0.2ex\hbox{--}\kern-0.5emD} \fi" #
"\ifx \undefined \dbar \def \dbar {\leavevmode\raise0.2ex\hbox{--}\kern-0.5emd} \fi" #
"\ifx \undefined \ocirc \def \ocirc #1{{\accent'27#1}} \fi" #
"\ifx \undefined \varvec \def \varvec #1{\hbox{\boldmath $#1$}} \fi"
}
@String{ack-nhfb = "Nelson H. F. Beebe,
University of Utah,
Department of Mathematics, 110 LCB,
155 S 1400 E RM 233,
Salt Lake City, UT 84112-0090, USA,
Tel: +1 801 581 5254,
FAX: +1 801 581 4148,
e-mail: \path|beebe@math.utah.edu|,
\path|beebe@acm.org|,
\path|beebe@computer.org| (Internet),
URL: \path|https://www.math.utah.edu/~beebe/|"}
@String{j-VLDB-J = "VLDB Journal: Very Large Data Bases"}
@Article{Breitbart:1992:TMI,
author = "Yuri Breitbart and Abraham Silberschatz and Glenn R.
Thompson",
title = "Transaction Management Issues in a Failure-Prone
Multidatabase System Environment",
journal = j-VLDB-J,
volume = "1",
number = "1",
pages = "1--39",
month = jul,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Breitbart:Yuri.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Silberschatz:Abraham.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Thompson:Glenn_R=.html",
abstract = "This paper is concerned with the problem of
integrating a number of existing, off-the-shelf local
database systems into a multidatabase system that
maintains consistency in the face of concurrency and
failures. The major difficulties in designing such
systems stem from the requirements that local
transactions be allowed to execute outside the
multidatabase system control, and that the various
local database systems cannot participate in the
execution of a global commit protocol. A scheme based
on the assumption that the component local database
systems use the strict two-phase locking protocol is
developed. Two major problems are addressed: How to
ensure global transaction atomicity without the
provision of a commit protocol, and how to ensure
freedom from global deadlocks.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "algorithms; deadlock recovery; performance;
reliability; serializibility; transaction log",
xxauthor = "Yuri Breitbart and Avi Silberschatz and Glenn R.
Thompson",
xxpages = "1--40",
}
@Article{Nodine:1992:CTH,
author = "Marian H. Nodine and Stanley B. Zdonik",
title = "Cooperative Transaction Hierarchies: Transaction
Support for Design Applications",
journal = j-VLDB-J,
volume = "1",
number = "1",
pages = "41--80",
month = jul,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Nodine:Marian_H=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zdonik:Stanley_B=.html",
abstract = "Traditional atomic and nested transactions are not
always well-suited to cooperative applications, such as
design applications. Cooperative applications place
requirements on the database that may conflict with the
serializability requirement. They require transactions
to be long, possibly nested, and able to interact with
each other in a structured way. We define a transaction
framework, called a {\em cooperative transaction
hierarchy}, that allows us to relax the requirement for
atomic, serializable transactions to better support
cooperative applications. In cooperative transaction
hierarchies, we allow the correctness specification for
groups of designers to be tailored to the needs of the
application. We use {\em patterns\/} and {\em
conflicts\/} to specify the constraints imposed on a
group's history for it to be correct. We also provide
some primitives to smooth the operation of the members.
We characterize deadlocks in a cooperative transaction
hierarchy, and provide mechanisms for deadlock
detection and resolution. We examine issues associated
with failure and recovery.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cooperation; deadlock detection; design transactions;
non-serializability; transaction hierarchies;
transaction synchronization; version management",
}
@Article{Spaccapietra:1992:MIA,
author = "Stefano Spaccapietra and Christine Parent and Yann
Dupont",
title = "Model Independent Assertions for Integration of
Heterogeneous Schemas",
journal = j-VLDB-J,
volume = "1",
number = "1",
pages = "81--126",
month = jul,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Dupont:Yann.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Parent:Christine.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Spaccapietra:Stefano.html",
abstract = "Due to the proliferation of database applications, the
integration of existing databases into a distributed or
federated system is one of the major challenges in
responding to enterprises' information requirements.
Some proposed integration techniques aim at providing
database administrators (DBAs) with a view definition
language they can use to build the desired integrated
schema. These techniques leave to the DBA the
responsibility of appropriately restructuring schema
elements from existing local schemas and of solving
inter-schema conflicts. This paper investigates the
{\em assertion-based\/} approach, in which the DBA's
action is limited to pointing out corresponding
elements in the schemas and to defining the nature of
the correspondence in between. This methodology is
capable of: ensuring better integration by taking into
account additional semantic information (assertions
about links); automatically solving structural
conflicts; building the integrated schema without
requiring conforming of initial schemas; applying
integration rules to a variety of data models; and
performing view as well as database integration. This
paper presents the basic ideas underlying our approach
and focuses on resolution of structural conflicts.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "conceptual modeling; database design and integration;
distributed databases; federated databases;
heterogeneous databases; schema integration",
}
@Article{Hsiao:1992:FDSa,
author = "David K. Hsiao",
title = "Federated Databases and Systems: {Part I} --- a
Tutorial on Their Data Sharing",
journal = j-VLDB-J,
volume = "1",
number = "1",
pages = "127--179",
month = jul,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hsiao:David_K=.html",
abstract = "The issues and solutions for the interoperability of a
class of heterogeneous databases and their database
systems are expounded in two parts. Part I presents the
data-sharing issues in federated databases and systems.
Part II, which will appear in a future issue, explores
resource-consolidation issues. {\em Interoperability\/}
in this context refers to data sharing among
heterogeneous databases, and to resource consolidation
of computer hardware, system software, and support
personnel. {\em Resource consolidation\/} requires the
presence of a database system architecture which
supports the heterogeneous system software, thereby
eliminating the need for various computer hardware and
support personnel. The class of heterogeneous databases
and database systems expounded herein is termed {\em
federated}, meaning that they are joined in order to
meet certain organizational requirements and because
they require their respective application
specificities, integrity constraints, and security
requirements to be upheld. Federated databases and
systems are new. While there are no technological
solutions, there has been considerable research towards
their development. This tutorial is aimed at exposing
the need for such solutions. A taxonomy is introduced
in our review of existing research undertakings and
exploratory developments. With this taxonomy, we
contrast and compare various approaches to federating
databases and systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "attribute-based;
data-model-and-language-to-data-model-and-language
mappings; database conversion; hierarchical; network;
object-oriented; relational; schema transformation;
transaction translation",
xxpages = "127--180",
}
@Article{Breitbart:1992:OMT,
author = "Yuri Breitbart and Hector Garcia-Molina and Abraham
Silberschatz",
title = "Overview of Multidatabase Transaction Management",
journal = j-VLDB-J,
volume = "1",
number = "2",
pages = "181--240",
month = oct,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Breitbart:Yuri.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Garcia=Molina:Hector.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Silberschatz:Abraham.html",
abstract = "A multidatabase system (MDBS) is a facility that
allows users access to data located in multiple
autonomous database management systems (DBMSs). In such
a system, {\em global transactions\/} are executed
under the control of the MDBS. Independently, {\em
local transactions\/} are executed under the control of
the local DBMSs. Each local DBMS integrated by the MDBS
may employ a different transaction management scheme.
In addition, each local DBMS has complete control over
all transactions (global and local) executing at its
site, including the ability to abort at any point any
of the transactions executing at its site. Typically,
no design or internal DBMS structure changes are
allowed in order to accommodate the MDBS. Furthermore,
the local DBMSs may not be aware of each other and, as
a consequence, cannot coordinate their actions. Thus,
traditional techniques for ensuring transaction
atomicity and consistency in homogeneous distributed
database systems may not be appropriate for an MDBS
environment. The objective of this article is to
provide a brief review of the most current work in the
area of multidatabase transaction management. We first
define the problem and argue that the multidatabase
research will become increasingly important in the
coming years. We then outline basic research issues in
multidatabase transaction management and review recent
results in the area. We conclude with a discussion of
open problems and practical implications of this
research.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "multidatabase; recovery; reliability; serializability;
transaction; two-level serializability",
xxauthor = "Yuri Breitbart and Hector Garcia-Molina and Avi
Silberschatz",
}
@Article{Drew:1992:TII,
author = "Pamela Drew and Roger King and Dennis Heimbigner",
title = "A Toolkit for the Incremental Implementation of
Heterogeneous Database Management Systems",
journal = j-VLDB-J,
volume = "1",
number = "2",
pages = "241--284",
month = oct,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Drew:Pamela.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Heimbigner:Dennis.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/King:Roger.html",
abstract = "The integration of heterogeneous database environments
is a difficult and complex task. The A la carte
Framework addresses this complexity by providing a
reusable and extensible architecture in which a set of
heterogeneous database management systems can be
integrated. The goal is to support incremental
integration of existing database facilities into
heterogeneous, interoperative, distributed systems. The
Framework addresses the three main issues in
heterogeneous systems integration. First, it identifies
the problems in integrating heterogeneous systems.
Second, it identifies the key interfaces and parameters
required for autonomous systems to interoperate
correctly. Third, it demonstrates an approach to
integrating these interfaces in an extensible and
incremental way. The A la carte Framework provides a
set of reusable, integrating components which integrate
the major functional domains, such as transaction
management, that could or should be integrated in
heterogeneous systems. It also provides a mechanism for
capturing key characteristics of the components and
constraints which describe how the components can be
mixed and interchanged, thereby helping to reduce the
complexity of the integration process. Using this
framework, we have implemented an experimental,
heterogeneous configuration as part of the object
management work in the software engineering research
consortium, Arcadia.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database toolkits; extensible databases; heterogeneous
databases; heterogeneous transaction management;
incremental integration; open architectures;
reconfigurable architectures",
}
@Article{Hsiao:1992:FDSb,
author = "David K. Hsiao",
title = "Federated Databases and Systems: {Part II} --- a
Tutorial on Their Resource Consolidation",
journal = j-VLDB-J,
volume = "1",
number = "2",
pages = "285--310",
month = oct,
year = "1992",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:23 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb1.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hsiao:David_K=.html",
abstract = "The issues and solutions for the interoperability of a
class of heterogeneous databases and their database
systems are expounded in two parts. Part I presented
the data-sharing issues in federated databases and
systems (Hsiao, 1992). The present article explores
resource-consolidation issues. {\em Interoperability\/}
in this context refers to data sharing among
heterogeneous databases, and to resource consolidation
of computer hardware, system software, and support
personnel. {\em Resource consolidation\/} requires the
presence of a database system architecture which
supports the heterogeneous system software, thereby
eliminating the need for various computer hardware and
support personnel. The class of heterogeneous databases
and database systems expounded herein is termed {\em
federated}, meaning that they are joined in order to
meet certain organizational requirements and because
they require their respective application
specificities, integrity constraints, and security
requirements to be upheld. Federated databases and
systems are new. While there are no technological
solutions, there has been considerable research towards
their development. This tutorial is aimed at exposing
the need for such solutions. A taxonomy is introduced
in our review of existing research undertakings and
exploratory developments. With this taxonomy, we
contrast and compare various approaches to federating
databases and systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "attribute-based;
data-model-and-language-to-data-model-and-language
mappings; database conversion; hierarchical; network;
object-oriented; relational; schema transformation;
transaction translation",
}
@Article{Yu:1993:BMB,
author = "Philip S. Yu and Douglas W. Cornell",
title = "Buffer Management Based on Return on Consumption in a
Multi-Query Environment",
journal = j-VLDB-J,
volume = "2",
number = "1",
pages = "1--37",
month = jan,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:24 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Cornell:Douglas_W=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yu:Philip_S=.html",
abstract = "In a multi-query environment, the marginal utilities
of allocating additional buffer to the various queries
can be vastly different. The conventional approach
examines each query in isolation to determine the
optimal access plan and the corresponding locality set.
This can lead to performance that is far from optimal.
As each query can have different access plans with
dissimilar locality sets and sensitivities to memory
requirement, we employ the concepts of memory
consumption and return on consumption (ROC) as the
basis for memory allocations. Memory consumption of a
query is its space-time product, while ROC is a measure
of the effectiveness of response-time reduction through
additional memory consumption. A global optimization
strategy using simulated annealing is developed, which
minimizes the average response over all queries under
the constraint that the total memory consumption rate
has to be less than the buffer size. It selects the
optimal join method and memory allocation for all query
types simultaneously. By analyzing the way the optimal
strategy makes memory allocations, a heuristic
threshold strategy is then proposed. The threshold
strategy is based on the concept of ROC. As the memory
consumption rate by all queries is limited by the
buffer size, the strategy tries to allocate the memory
so as to make sure that a certain level of ROC is
achieved. A simulation model is developed to
demonstrate that the heuristic strategy yields
performance that is very close to the optimal strategy
and is far superior to the conventional allocation
strategy.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "buffer management; join methods; query optimization;
queueing model; simulated annealing; simulation",
xxpages = "1--38",
}
@Article{Harder:1993:CCI,
author = "Theo H{\"a}rder and Kurt Rothermel",
title = "Concurrency Control Issues in Nested Transactions",
journal = j-VLDB-J,
volume = "2",
number = "1",
pages = "39--74",
month = jan,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:24 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/H=auml=rder:Theo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Rothermel:Kurt.html",
abstract = "The concept of nested transactions offers more
decomposable execution units and finer-grained control
over concurrency and recovery than `flat' transactions.
Furthermore, it supports the decomposition of a `unit
of work' into subtasks and their appropriate
distribution in a computer system as a prerequisite of
intratransaction parallelism. However, to exploit its
full potential, suitable granules of concurrency
control as well as access modes for shared data are
necessary. In this article, we investigate various
issues of concurrency control for nested transactions.
First, the mechanisms for cooperation and communication
within nested transactions should not impede parallel
execution of transactions among parent and children or
among siblings. Therefore, a model for nested
transactions is proposed allowing for effective
exploitation of intra-transaction parallelism. Starting
with a set of basic locking rules, we introduce the
concept of `downward inheritance of locks' to make data
manipulated by a parent available to its children. To
support supervised and restricted access, this concept
is refined to `controlled downward inheritance.' The
initial concurrency control scheme was based on S-X
locks for `flat,' non-overlapping data objects. In
order to adjust this scheme for practical applications,
a set of concurrency control rules is derived for
generalized lock modes described by a compatibility
matrix. Also, these rules are combined with a
hierarchical locking scheme to improve selective access
to data granules of varying sizes. After having tied
together both types of hierarchies (transaction and
object), it can be shown how `controlled downward
inheritance' for hierarchical objects is achieved in
nested transactions. Finally, problems of deadlock
detection and resolution in nested transactions are
considered.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; locking; nested transactions;
object hierarchies",
}
@Article{Jensen:1993:UDT,
author = "Christian S. Jensen and Leo Mark and Nick Roussopoulos
and Timos K. Sellis",
title = "Using Differential Techniques to Efficiently Support
Transaction Time",
journal = j-VLDB-J,
volume = "2",
number = "1",
pages = "75--116",
month = jan,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:24 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jensen:Christian_S=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mark:Leo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Roussopoulos:Nick.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sellis:Timos_K=.html",
abstract = "We present an architecture for query processing in the
relational model extended with transaction time. The
architecture integrates standard query optimization and
computation techniques with new differential
computation techniques. Differential computation
computes a query incrementally or decrementally from
the cached and indexed results of previous
computations. The use of differential computation
techniques is essential in order to provide efficient
processing of queries that access very large temporal
relations. Alternative query plans are integrated into
a state transition network, where the state space
includes backlogs of base relations, cached results
from previous computations, a cache index, and
intermediate results; the transitions include standard
relational algebra operators, operators for
constructing differential files, operators for
differential computation, and combined operators. A
rule set is presented to prune away parts of state
transition networks that are not promising, and dynamic
programming techniques are used to identify the optimal
plans from the remaining state transition networks. An
extended logical access path serves as a `structuring'
index on the cached results and contains, in addition,
vital statistics for the query optimization process
(including statistics about base relations, backlogs,
and queries---previously computed and cached,
previously computed, or just previously estimated).",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "efficient query processing; incremental and
decremental computation; temporal databases;
transaction time",
}
@Article{Haritsa:1993:VBS,
author = "Jayant R. Haritsa and Michael J. Carey and Miron
Livny",
title = "Value-Based Scheduling in Real-Time Database Systems",
journal = j-VLDB-J,
volume = "2",
number = "2",
pages = "117--152",
month = apr,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:25 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Carey:Michael_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Haritsa:Jayant_R=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Livny:Miron.html",
abstract = "In a real-time database system, an application may
assign a {\em value\/} to a transaction to reflect the
return it expects to receive if the transaction commits
before its deadline. Most research on real-time
database systems has focused on systems where all
transactions are assigned the same value, the
performance goal being to minimize the number of missed
deadlines. When transactions are assigned different
values, the goal of the system shifts to maximizing the
sum of the values of those transactions that commit by
their deadlines. Minimizing the number of missed
deadlines becomes a secondary concern. In this article,
we address the problem of establishing a priority
ordering among transactions characterized by both
values and deadlines that results in maximizing the
realized value. Of particular interest is the tradeoff
established between these values and deadlines in
constructing the priority ordering. Using a detailed
simulation model, we evaluate the performance of
several priority mappings that make this tradeoff in
different, but fixed, ways. In addition, a `bucket'
priority mechanism that allows the relative importance
of values and deadlines to be controlled is introduced
and studied. The notion of associating a penalty with
transactions whose deadlines are not met is also
briefly considered.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "priority and concurrency algorithms; priority mapping;
resource and data contention; transaction values and
deadlines",
}
@Article{Grant:1993:QLR,
author = "John Grant and Witold Litwin and Nick Roussopoulos and
Timos K. Sellis",
title = "Query Languages for Relational Multidatabases",
journal = j-VLDB-J,
volume = "2",
number = "2",
pages = "153--171",
month = apr,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:25 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Grant:John.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Litwin:Witold.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Roussopoulos:Nick.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sellis:Timos_K=.html",
abstract = "With the existence of many autonomous databases widely
accessible through computer networks, users will
require the capability to jointly manipulate data in
different databases. A multidatabase system provides
such a capability through a multidatabase manipulation
language, such as MSQL. We propose a theoretical
foundation for such languages by presenting a
multirelational algebra and calculus based on the
relational algebra and calculus. The proposal is
illustrated by various queries on an example
multidatabase. It is shown that properties of the
multirelational algebra may be used for optimization
and that every multirelational algebra query can be
expressed as a multirelational calculus query. The
connection between the multirelational languages and
MSQL, the multidatabase version of SQL, is also
investigated.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "multidatabase; multirelational algebra;
multirelational calculus; query optimization",
xxpages = "153--172",
}
@Article{Neufeld:1993:GCT,
author = "Andrea Neufeld and Guido Moerkotte and Peter C.
Lockemann",
title = "Generating Consistent Test Data for a Variable Set of
General Consistency Constraints",
journal = j-VLDB-J,
volume = "2",
number = "2",
pages = "173--213",
month = apr,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:25 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lockemann:Peter_C=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Moerkotte:Guido.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Neufeld:Andrea.html",
abstract = "To address the problem of generating test data for a
set of general consistency constraints, we propose a
new two-step approach: First the interdependencies
between consistency constraints are explored and a
generator formula is derived on their basis. During its
creation, the user may exert control. In essence, the
generator formula contains information to restrict the
search for consistent test databases. In the second
step, the test database is generated. Here, two
different approaches are proposed. The first adapts an
already published approach to generating finite models
by enhancing it with requirements imposed by test data
generation. The second, a new approach, operationalizes
the generator formula by translating it into a sequence
of operators, and then executes it to construct the
test database. For this purpose, we introduce two
powerful operators: the generation operator and the
test-and-repair operator. This approach also allows for
enhancing the generation operators with heuristics for
generating facts in a goal-directed fashion. It avoids
the generation of test data that may contradict the
consistency constraints, and limits the search space
for the test data. This article concludes with a
careful evaluation and comparison of the performance of
the two approaches and their variants by describing a
number of benchmarks and their results.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "consistency; design; logic; test data; validation",
xxpages = "173--214",
xxtitle = "Generating consistent test data: restricting the
search space by a generator formula",
}
@Article{Du:1993:SCU,
author = "Weimin Du and Ahmed K. Elmagarmid and Won Kim and
Omran A. Bukhres",
title = "Supporting Consistent Updates in Replicated
Multidatabase Systems",
journal = j-VLDB-J,
volume = "2",
number = "2",
pages = "215--241",
month = apr,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:25 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Bukhres:Omran_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Du:Weimin.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/e/Elmagarmid:Ahmed_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kim:Won.html",
abstract = "Replication is useful in multidatabase systems (MDBSs)
because, as in traditional distributed database
systems, it increases data availability in the presence
of failures and decreases data retrieval costs by
reading local or close copies of data. Concurrency
control, however, is more difficult in replicated MDBSs
than in ordinary distributed database systems. This is
the case not only because local concurrency controllers
may schedule global transactions inconsistently, but
also because local transactions (at different sites)
may access the same replicated data. In this article,
we propose a decentralized concurrency control protocol
for a replicated MDBS. The proposed strategy supports
prompt and consistent updates of replicated data by
both local and global applications without a central
coordinator.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; multidatabases; replica control;
replicated data management; resolvable conflicts;
serializability",
}
@Article{Anonymous:1993:Ca,
author = "Anonymous",
title = "Column",
journal = j-VLDB-J,
volume = "2",
number = "2",
pages = "??--??",
month = apr,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:25 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Anonymous:1993:Cb,
author = "Anonymous",
title = "Column",
journal = j-VLDB-J,
volume = "2",
number = "2",
pages = "??--??",
month = apr,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:25 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Tomasic:1993:SIP,
author = "Anthony Tomasic and Hector Garcia-Molina",
title = "Special Issue in Parallelism in Database Systems:
Query Processing and Inverted Indices in Shared-Nothing
Document Information Retrieval Systems",
journal = j-VLDB-J,
volume = "2",
number = "3",
pages = "243--275",
month = jul,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Garcia=Molina:Hector.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tomasic:Anthony.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Tomasic:1993:QPI,
author = "Anthony Tomasic and Hector Garcia-Molina",
title = "Query processing and inverted indices in shared:
nothing text document information retrieval systems",
journal = j-VLDB-J,
volume = "2",
number = "3",
pages = "243--276",
month = jul,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:26 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The performance of distributed text document retrieval
systems is strongly influenced by the organization of
the inverted text. This article compares the
performance impact on query processing of various
physical organizations for inverted lists. We present a
new probabilistic model of the database and queries.
Simulation experiments determine those variables that
most strongly influence response time and throughput.
This leads to a set of design trade-offs over a wide
range of hardware configurations and new parallel query
processing strategies.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "file organization; full text information retrieval;
inverted file; inverted index; performance; query
processing; shared-nothing; striping",
}
@Article{Ziane:1993:PQP,
author = "Mikal Ziane and Mohamed Za{\"\i}t and Pascale
Borla-Salamet",
title = "Parallel Query Processing with Zigzag Trees",
journal = j-VLDB-J,
volume = "2",
number = "3",
pages = "277--301",
month = jul,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:26 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Borla=Salamet:Pascale.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Za=iuml=t:Mohamed.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Ziane:Mikal.html",
abstract = "In this article, we describe our approach to the
compile-time optimization and parallelization of
queries for execution in DBS3 or EDS. DBS3 is a
shared-memory parallel database system, while the EDS
system has a distributed-memory architecture. Because
DBS3 implements a parallel dataflow execution model,
this approach applies to both architectures. Using
randomized search strategies enables the exploration of
a search space large enough to include zigzag trees,
which are intermediate between left-deep and right-deep
trees. Zigzag trees are shown to provide better
response time than right-deep trees in case of limited
memory. Performance measurements obtained using the
DBS3 prototype show the advantages of zigzag trees
under various conditions.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cost function; fragmentation; pipeline; search space",
xxpages = "277--302",
}
@Article{Hua:1993:CDS,
author = "Kien A. Hua and Yu-lung Lo and Honesty C. Young",
title = "Considering Data Skew Factor in Multi-Way Join Query
Optimization for Parallel Execution",
journal = j-VLDB-J,
volume = "2",
number = "3",
pages = "303--330",
month = jul,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:26 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hua:Kien_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lo:Yu=lung.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Young:Honesty_C=.html",
abstract = "A consensus on parallel architecture for very large
database management has emerged. This architecture is
based on a shared-nothing hardware organization. The
computation model is very sensitive to skew in tuple
distribution, however. Recently, several parallel join
algorithms with dynamic load balancing capabilities
have been proposed to address this issue, but none of
them consider multi-way join problems. In this article
we propose a dynamic load balancing technique for
multi-way joins, and investigate the effect of load
balancing on query optimization. In particular, we
present a join-ordering strategy that takes
load-balancing issues into consideration. Our
performance study indicates that the proposed query
optimization technique can provide very impressive
performance improvement over conventional approaches.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "load balancing; multi-way join; parallel-database
computer; query optimization",
xxauthor = "Kien A. Hua and Yo Lung Lo and Honesty C. Young",
}
@Article{Zhang:1993:TGC,
author = "Aidong Zhang and Ahmed K. Elmagarmid",
title = "A Theory of Global Concurrency Control in
Multidatabase Systems",
journal = j-VLDB-J,
volume = "2",
number = "3",
pages = "331--360",
month = jul,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:26 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/e/Elmagarmid:Ahmed_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zhang:Aidong.html",
abstract = "This article presents a theoretical basis for global
concurrency control to maintain global serializability
in multidatabase systems. Three correctness criteria
are formulated that utilize the intrinsic
characteristics of global transactions to determine the
serialization order of global subtransactions at each
local site. In particular, two new types of
serializability, chain-conflicting serializability and
sharing serializability, are proposed and hybrid
serializability, which combines these two basic
criteria, is discussed. These criteria offer the
advantage of imposing no restrictions on local sites
other than local serializability while retaining global
serializability. The graph testing techniques of the
three criteria are provided as guidance for global
transaction scheduling. In addition, an optimal
property of global transactions for determinating the
serialization order of global subtransactions at local
sites is formulated. This property defines the upper
limit on global serializability in multidatabase
systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "chain-conflicting serializability; hybrid
serializability; optimality; sharing serializability",
}
@Article{Anonymous:1993:SIP,
author = "Anonymous",
title = "Special issue in parallelism in database systems",
journal = j-VLDB-J,
volume = "2",
number = "3",
pages = "??--??",
month = jul,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:26 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Srinivasan:1993:PBT,
author = "V. Srinivasan and Michael J. Carey",
title = "Performance of {B$^+$} tree concurrency control
algorithms",
journal = j-VLDB-J,
volume = "2",
number = "4",
pages = "361--406",
month = oct,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:27 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Carey:Michael_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Srinivasan:V=.html",
abstract = "A number of algorithms have been proposed to access
B$^+$-trees concurrently, but they are not well
understood. In this article, we study the performance
of various B$^+$-tree concurrency control algorithms
using a detailed simulation model of B$^+$-tree
operations in a centralized DBMS. Our study covers a
wide range of data contention situations and resource
conditions. In addition, based on the performance of
the set of B$^+$-tree concurrency control algorithms,
which includes one new algorithm, we make projections
regarding the performance of other algorithms in the
literature. Our results indicate that algorithms with
updaters that lock-couple using exclusive locks perform
poorly as compared to those that permit more optimistic
index descents. In particular, the B-link algorithms
are seen to provide the most concurrency and the best
overall performance. Finally, we demonstrate the need
for a highly concurrent long-term lock holding strategy
to obtain the full benefits of a highly concurrent
algorithm for index operations.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "B+-tree structures; data contention; lock modes;
performance; resource conditions; simulation models;
workload parameters",
xxtitle = "Performance of {B+} Tree Concurrency Algorithms",
}
@Article{Weikum:1993:MLT,
author = "Gerhard Weikum and Christof Hasse",
title = "Multi-Level Transaction Management for Complex
Objects: Implementation, Performance, Parallelism",
journal = j-VLDB-J,
volume = "2",
number = "4",
pages = "407--453",
month = oct,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:27 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hasse:Christof.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Weikum:Gerhard.html",
abstract = "Multi-level transactions are a variant of open-nested
transactions in which the subtransactions correspond to
operations at different levels of a layered system
architecture. They allow the exploitation of semantics
of high-level operations to increase concurrency. As a
consequence, undoing a transaction requires
compensation of completed subtransactions. In addition,
multi-level recovery methods must take into
consideration that high-level operations are not
necessarily atomic if multiple pages are updated in a
single subtransaction. This article presents algorithms
for multi-level transaction management that are
implemented in the database kernel system (DASDBS). In
particular, we show that multi-level recovery can be
implemented in an efficient way. We discuss performance
measurements using a synthetic benchmark for processing
complex objects in a multi-user environment. We show
that multi-level transaction management can be extended
easily to cope with parallel subtransactions within a
single transaction. Performance results are presented
with varying degrees of inter- and intratransaction
parallelism.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "atomicity; complex objects; inter- and
intratransaction parallelism; multi-level transactions;
performance; persistence; recovery",
xxpages = "407--454",
}
@Article{Storey:1993:USR,
author = "Veda C. Storey",
title = "Understanding Semantic Relationships",
journal = j-VLDB-J,
volume = "2",
number = "4",
pages = "455--488",
month = oct,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:27 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Storey:Veda_C=.html",
abstract = "To develop sophisticated database management systems,
there is a need to incorporate more understanding of
the real world in the information that is stored in a
database. Semantic data models have been developed to
try to capture some of the meaning, as well as the
structure, of data using abstractions such as
inclusion, aggregation, and association. Besides these
well-known relationships, a number of additional
semantic relationships have been identified by
researchers in other disciplines such as linguistics,
logic, and cognitive psychology. This article explores
some of the lesser-recognized semantic relationships
and discusses both how they could be captured, either
manually or by using an automated tool, and their
impact on database design. To demonstrate the
feasibility of this research, a prototype system for
analyzing semantic relationships, called the Semantic
Relationship Analyzer, is presented.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database design; database design systems;
entity-relationship model; relational model; semantic
relationships",
}
@Article{Tseng:1993:SMS,
author = "Frank Shou-Cheng Tseng and Arbee L. P. Chen and W.-P.
Yang",
title = "Searching a Minimal Semantically-Equivalent Subset of
a Set of Partial Values",
journal = j-VLDB-J,
volume = "2",
number = "4",
pages = "489--512",
month = oct,
year = "1993",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:27 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb2.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chen:Arbee_L=_P=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tseng:Frank_Shou=Cheng.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yang:W==P=.html",
abstract = "Imprecise data exist in databases due to their
unavailability or to data/schema incompatibilities in a
multidatabase system. Partial values have been used to
represent imprecise data. Manipulation of partial
values is therefore necessary to process queries
involving imprecise data. In this article, we study the
problem of eliminating redundant partial values that
result from a projection on an attribute with partial
values. The redundancy of partial values is defined
through the interpretation of a set of partial values.
This problem is equivalent to searching a minimal
semantically-equivalent subset of a set of partial
values. A semantically-equivalent subset contains
exactly the same information as the original set. We
derive a set of useful properties and apply a graph
matching technique to develop an efficient algorithm
for searching such a minimal subset and therefore
eliminating redundant partial values. By this process,
we not only provide a concise answer to the user, but
also reduce the communication cost when partial values
are requested to be transmitted from one site to
another site in a distributed environment. Moreover,
further manipulation of the partial values can be
simplified. This work is also extended to the case of
multi-attribute projections.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "bipartite graph; graph matching; imprecise data;
minimal elements; multidatabase systems; partial
values",
xxauthor = "Frank S. C. Tseng and Arbee L. P. Chen and Wei Pang
Yang",
}
@Article{Georgakopoulos:1994:CST,
author = "Dimitrios Georgakopoulos and Marek Rusinkiewicz and
Witold Litwin",
title = "Chronological Scheduling of Transactions with Temporal
Dependencies",
journal = j-VLDB-J,
volume = "3",
number = "1",
pages = "1--28",
month = jan,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:28 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Georgakopoulos:Dimitrios.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Litwin:Witold.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Rusinkiewicz:Marek.html",
abstract = "Database applications often impose temporal
dependencies between transactions that must be
satisfied to preserve data consistency. The extant
correctness criteria used to schedule the execution of
concurrent transactions are either time independent or
use strict, difficult to satisfy real-time constraints.
On one end of the spectrum, serializability completely
ignores time. On the other end, deadline scheduling
approaches consider the outcome of each transaction
execution correct only if the transaction meets its
real-time deadline. In this article, we explore new
correctness criteria and scheduling methods that
capture temporal transaction dependencies and belong to
the broad area between these two extreme approaches. We
introduce the concepts of {\em succession dependency\/}
and {\em chronological dependency\/} and define
correctness criteria under which temporal dependencies
between transactions are preserved even if the
dependent transactions execute concurrently. We also
propose a {\em chronological scheduler\/} that can
guarantee that transaction executions satisfy their
chronological constraints. The advantages of
chronological scheduling over traditional scheduling
methods, as well as the main issues in the
implementation and performance of the proposed
scheduler, are discussed.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrent succession; execution correctness; partial
rollbacks; synchronization; transaction ordering",
}
@Article{Whang:1994:DMD,
author = "Kyu Young Whang and Sang Wook Kim and Gio
Wiederhold",
title = "Dynamic Maintenance of Data Distribution for
Selectivity Estimation",
journal = j-VLDB-J,
volume = "3",
number = "1",
pages = "29--51",
month = jan,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:28 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kim:Sang=Wook.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Whang:Kyu=Young.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Wiederhold:Gio.html",
abstract = "We propose a new dynamic method for multidimensional
selectivity estimation for range queries that works
accurately independent of data distribution. Good
estimation of selectivity is important for query
optimization and physical database design. Our method
employs the multilevel grid file (MLGF) for accurate
estimation of multidimensional data distribution. The
MLGF is a dynamic, hierarchical, balanced,
multidimensional file structure that gracefully adapts
to nonuniform and correlated distributions. We show
that the MLGF directory naturally represents a
multidimensional data distribution. We then extend it
for further refinement and present the selectivity
estimation method based on the MLGF. Extensive
experiments have been performed to test the accuracy of
selectivity estimation. The results show that
estimation errors are very small independent of
distributions, even with correlated and/or highly
skewed ones. Finally, we analyze the cause of errors in
estimation and investigate the effects of various
parameters on the accuracy of estimation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "multidimensional file structure; multilevel grid
files; physical database design; query optimization",
}
@Article{Kamel:1994:PBO,
author = "Nabil Kamel and Ping Wu and Stanley Y. W. Su",
title = "A Pattern-Based Object Calculus",
journal = j-VLDB-J,
volume = "3",
number = "1",
pages = "53--76",
month = jan,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:28 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kamel:Nabil.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Su:Stanley_Y=_W=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Wu:Ping.html",
abstract = "Several object-oriented database management systems
have been implemented without an accompanying
theoretical foundation for constraint, query
specification, and processing. The pattern-based object
calculus presented in this article provides such a
theoretical foundation for describing and processing
object-oriented databases. We view an object-oriented
database as a network of interrelated classes (i.e.,
the intension) and a collection of time-varying object
association patterns (i.e., the extension). The object
calculus is based on first-order logic. It provides the
formalism for interpreting precisely and uniformly the
semantics of queries and integrity constraints in
object-oriented databases. The power of the object
calculus is shown in four aspects. First, associations
among objects are expressed explicitly in an
object-oriented database. Second, the `nonassociation'
operator is included in the object calculus. Third,
set-oriented operations can be performed on both
homogeneous and heterogeneous object association
patterns. Fourth, our approach does not assume a
specific form of database schema. A proposed formalism
is also applied to the design of high-level
object-oriented query and constraint languages.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "association patterns; Object-oriented databases; query
expressions; semantic constraints",
}
@Article{Sciore:1994:VCM,
author = "Edward Sciore",
title = "Versioning and Configuration Management in an
Object-Oriented Data Model",
journal = j-VLDB-J,
volume = "3",
number = "1",
pages = "77--106",
month = jan,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:28 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sciore:Edward.html",
abstract = "Many database applications require the storage and
manipulation of different versions of data objects. To
satisfy the diverse needs of these applications,
current database systems support versioning at a very
low level. This article demonstrates that
application-independent versioning can be supported at
a significantly higher level. In particular, we extend
the EXTRA data model and EXCESS query language so that
configurations can be specified conceptually and
non-procedurally. We also show how version sets can be
viewed multidimensionally, thereby allowing
configurations to be expressed at a higher level of
abstraction. The resulting model integrates and
generalizes ideas in CAD systems, CASE systems, and
temporal databases.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "EXTRA/EXCESS data models; generic and specific
references; query language; semantically based
configuration specifications",
}
@Article{Ramamohanarao:1994:IDD,
author = "Kotagiri Ramamohanarao and James Harland",
title = "An introduction to deductive database languages and
systems",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "107--122",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:29 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Ramamohanarao:1994:SIP,
author = "Kotagiri Ramamohanarao and James Harland",
title = "Special Issue on Prototypes of Deductive Database
Systems: An Introduction to Deductive Database
Languages and Systems",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "107--122",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Harland:James.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramamohanarao:Kotagiri.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Derr:1994:GND,
author = "Marcia A. Derr and Shinichi Morishita and Geoffrey
Phipps",
title = "The Glue-Nail Deductive Database System: Design,
Implementation, and Evaluation",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "123--160",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:29 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Derr:Marcia_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Morishita:Shinichi.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Phipps:Geoffrey.html",
abstract = "We describe the design and implementation of the
Glue-Nail deductive database system. Nail is a purely
declarative query language; Glue is a procedural
language used for non-query activities. The two
languages combined are sufficient to write a complete
application. Nail and Glue code are both compiled into
the target language IGlue. The Nail compiler uses
variants of the magic sets algorithm and supports
well-founded models. The Glue compiler's static
optimizer uses peephole techniques and data flow
analysis to improve code. The IGlue interpreter
features a run-time adaptive optimizer that reoptimizes
queries and automatically selects indexes. We also
describe the Glue-Nail benchmark suite, a set of
applications developed to evaluate the Glue-Nail
language and to measure the performance of the
system.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "language; performance; query optimization",
}
@Article{Ramakrishnan:1994:CDS,
author = "Raghu Ramakrishnan and Divesh Srivastava and S.
Sudarshan and Praveen Seshadri",
title = "The {CORAL} Deductive System",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "161--210",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:29 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramakrishnan:Raghu.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Seshadri:Praveen.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Srivastava:Divesh.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sudarshan:S=.html",
abstract = "CORAL is a deductive system that supports a rich
declarative language, and an interface to C++, which
allows for a combination of declarative and imperative
programming. A CORAL declarative program can be
organized as a collection of interacting modules. CORAL
supports a wide range of evaluation strategies, and
automatically chooses an efficient strategy for each
module in the program. Users can guide query
optimization by selecting from a wide range of control
choices. The CORAL system provides imperative
constructs to update, insert, and delete facts. Users
can program in a combination of declarative CORAL and
C++ extended with CORAL primitives. A high degree of
extensibility is provided by allowing C++ programmers
to use the class structure of C++ to enhance the CORAL
implementation. CORAL provides support for main-memory
data and, using the EXODUS storage manager,
disk-resident data. We present a comprehensive view of
the system from broad design goals, the language, and
the architecture, to language interfaces and
implementation details.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "deductive database; logic programming system; query
language",
}
@Article{Kiessling:1994:DSE,
author = "Werner Kie{\ss}ling and Helmut Schmidt and Werner
Strau{\ss} and Gerhard D{\"u}nzinger",
title = "{DECLARE} and {SDS}: Early Efforts to Commercialize
Deductive Database Technology",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "211--243",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:29 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/D=uuml=nzinger:Gerhard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kie=szlig=ling:Werner.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Schmidt:Helmut.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Strau=szlig=:Werner.html",
abstract = "The Smart Data System (SDS) and its declarative query
language, Declarative Reasoning, represent the first
large-scale effort to commercialize deductive database
technology. SDS offers the functionality of deductive
reasoning in a distributed, heterogeneous database
environment. In this article we discuss several
interesting aspects of the query compilation and
optimization process. The emphasis is on the query
execution plan data structure and its transformations
by the optimizing rule compiler. Through detailed case
studies we demonstrate that efficient and very compact
runtime code can be generated. We also discuss our
experiences gained from a large pilot application (the
MVV-expert) and report on several issues of practical
interest in engineering such a complex system,
including the migration from Lisp to C. We argue that
heuristic knowledge and control should be made an
integral part of deductive databases.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "declarative reasoning; distributed query processing;
heuristic control; multi-databases; productization;
query optimizer",
}
@Article{Vaghani:1994:ADD,
author = "Jayen Vaghani and Kotagiri Ramamohanarao and David B.
Kemp and Zoltan Somogyi and Peter J. Stuckey and Tim
S. Leask and James Harland",
title = "The {Aditi} Deductive Database System",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "245--288",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:29 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Harland:James.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kemp:David_B=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Leask:Tim_S=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramamohanarao:Kotagiri.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Somogyi:Zoltan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Stuckey:Peter_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/v/Vaghani:Jayen.html",
abstract = "Deductive databases generalize relational databases by
providing support for recursive views and non-atomic
data. Aditi is a deductive system based on the
client-server model; it is inherently multi-user and
capable of exploiting parallelism on shared-memory
multiprocessors. The back-end uses relational
technology for efficiency in the management of
disk-based data and uses optimization algorithms
especially developed for the bottom-up evaluation of
logical queries involving recursion. The front-end
interacts with the user in a logical language that has
more expressive power than relational query languages.
We present the structure of Aditi, discuss its
components in some detail, and present performance
figures.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "implementation; logic; multi-user; parallelism;
relational database",
}
@Article{Anonymous:1994:SIP,
author = "Anonymous",
title = "Special issue on prototypes of deductive database
systems",
journal = j-VLDB-J,
volume = "3",
number = "2",
pages = "??--??",
month = apr,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:29 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Lee:1994:EIV,
author = "Byung Suk Lee and Gio Wiederhold",
title = "Efficiently Instantiating View-Objects From Remote
Relational Databases",
journal = j-VLDB-J,
volume = "3",
number = "3",
pages = "289--323",
month = jul,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:30 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lee:Byung_Suk.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Wiederhold:Gio.html",
abstract = "View-objects are complex objects that are instantiated
by delivering a query to a database and converting the
query result into a nested structure. In relational
databases, query results are conventionally retrieved
as a single flat relation, which contains duplicate
subtuples in its composite tuples. These duplicate
subtuples increase the amount of data to be handled and
thus degrade performance. In this article, we describe
two new methods that retrieve a query result in
structures other than a single flat relation. One
method retrieves a set of relation fragments, and the
other retrieves a single-nested relation. We first
describe their algorithms and cost models, and then
present the cost comparison results in a client-server
architecture with a relational main memory database
residing on a server.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "client server; complex object; nested relation; query
optimization; relation fragments",
}
@Article{Barbara-Milla:1994:DPT,
author = "Daniel Barbar{\'a}-Mill{\'a} and Hector
Garcia-Molina",
title = "The demarcation protocol: a technique for maintaining
constraints in distributed database systems",
journal = j-VLDB-J,
volume = "3",
number = "3",
pages = "325--353",
month = jul,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:30 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Traditional protocols for distributed database
management have a high message overhead; restrain or
lock access to resources during protocol execution; and
may become impractical for some scenarios like
real-time systems and very large distributed databases.
In this article, we present the demarcation protocol;
it overcomes these problems by using explicit
consistency constraints as the correctness criteria.
The method establishes safe limits as `lines drawn in
the sand' for updates, and makes it possible to change
these limits dynamically, enforcing the constraints at
all times. We show how this technique can be applied to
linear arithmetic, existential, key, and approximate
copy constraints.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "consistency constraints; serializability; transaction
limits",
}
@Article{Barbara:1994:DPT,
author = "Daniel Barbar{\'a} and Hector Garcia-Molina",
title = "The Demarcation Protocol: a Technique for
Maintaining Constraints in Distributed Database
Systems",
journal = j-VLDB-J,
volume = "3",
number = "3",
pages = "325--353",
month = jul,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Barbar=aacute=:Daniel.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Garcia=Molina:Hector.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Bertino:1994:ICO,
author = "Elisa Bertino",
title = "Index Configuration in Object-Oriented Databases",
journal = j-VLDB-J,
volume = "3",
number = "3",
pages = "355--399",
month = jul,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:30 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Bertino:Elisa.html",
abstract = "In relational databases, an attribute of a relation
can have only a single primitive value, making it
cumbersome to model complex objects. The
object-oriented paradigm removes this difficulty by
introducing the notion of nested objects, which allows
the value of an object attribute to be another object
or a set of other objects. This means that a class
consists of a set of attributes, and the values of the
attributes are objects that belong to other classes;
that is, the definition of a class forms a hierarchy of
classes. All attributes of the nested classes are
nested attributes of the root of the hierarchy. A
branch of such hierarchy is called a {\em path}. In
this article, we address the problem of index
configuration for a given path. We first summarize some
basic concepts, and introduce the concept of index
configuration for a path. Then we present cost formulas
to evaluate the costs of the various configurations.
Finally, we present the algorithm that determines the
optimal configuration, and show its correctness.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "index selection; physical database design; query
optimization",
}
@Article{Guting:1994:ISD,
author = "Ralf Hartmut G{\"u}ting",
title = "An introduction to spatial database systems",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "357--399",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "We propose a definition of a spatial database system
as a database system that offers spatial data types in
its data model and query language, and supports spatial
data types in its implementation, providing at least
spatial indexing and spatial join methods. Spatial
database systems offer the underlying database
technology for geographic information systems and other
applications. We survey data modeling, querying, data
structures and algorithms, and system architecture for
such systems. The emphasis is on describing known
technology in a coherent manner, rather than listing
open problems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Guting:1994:SIS,
author = "Ralf Hartmut G{\"u}ting",
title = "Special Issue on Spatial Database Systems: An
Introduction to Spatial Database Systems",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "357--399",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/G=uuml=ting:Ralf_Hartmut.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Baumann:1994:MMD,
author = "Peter Baumann",
title = "Management of Multidimensional Discrete Data",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "401--444",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:31 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Baumann:Peter.html",
abstract = "Spatial database management involves two main
categories of data: vector and raster data. The former
has received a lot of in-depth investigation; the
latter still lacks a sound framework. Current DBMSs
either regard raster data as pure byte sequences where
the DBMS has no knowledge about the underlying
semantics, or they do not complement array structures
with storage mechanisms suitable for huge arrays, or
they are designed as specialized systems with
sophisticated imaging functionality, but no general
database capabilities (e.g., a query language). Many
types of array data will require database support in
the future, notably 2-D images, audio data and general
signal-time series (1-D), animations (3-D), static or
time-variant voxel fields (3-D and 4-D), and the
ISO/IEC PIKS (Programmer's Imaging Kernel System)
BasicImage type (5-D). In this article, we propose a
comprehensive support of {\em multidimensional discrete
data\/} (MDD) in databases, including operations on
arrays of arbitrary size over arbitrary data types. A
set of requirements is developed, a small set of
language constructs is proposed (based on a formal
algebraic semantics), and a novel MDD architecture is
outlined to provide the basis for efficient MDD query
evaluation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "image database systems; multimedia database systems;
spatial index; tiling",
}
@Article{Chu:1994:SMA,
author = "Wesley W. Chu and Ion Tim Ieong and Ricky K. Taira",
title = "A Semantic Modeling Approach for Image Retrieval by
Content",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "445--477",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:31 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chu:Wesley_W=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/i/Ieong:Ion_Tim.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Taira:Ricky_K=.html",
abstract = "We introduce a semantic data model to capture the
hierarchical, spatial, temporal, and evolutionary
semantics of images in pictorial databases. This model
mimics the user's conceptual view of the image content,
providing the framework and guidelines for
preprocessing to extract image features. Based on the
model constructs, a spatial evolutionary query language
(SEQL), which provides direct image object manipulation
capabilities, is presented. With semantic information
captured in the model, spatial evolutionary queries are
answered efficiently. Using an object-oriented
platform, a prototype medical-image management system
was implemented at UCLA to demonstrate the feasibility
of the proposed approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "image; medical; multimedia databases; spatial query
processing; temporal evolutionary query processing",
}
@Article{Papadias:1994:QRS,
author = "Dimitris Papadias and Timos K. Sellis",
title = "Qualitative Representation of Spatial Knowledge in
Two-Dimensional Space",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "479--516",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:31 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Papadias:Dimitris.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sellis:Timos_K=.html",
abstract = "Various relation-based systems, concerned with the
qualitative representation and processing of spatial
knowledge, have been developed in numerous application
domains. In this article, we identify the common
concepts underlying qualitative spatial knowledge
representation, we compare the representational
properties of the different systems, and we outline the
computational tasks involved in relation-based spatial
information processing. We also describe {\em symbolic
spatial indexes}, relation-based structures that
combine several ideas in spatial knowledge
representation. A symbolic spatial index is an array
that preserves only a set of spatial relations among
distinct objects in an image, called the modeling
space; the index array discards information, such as
shape and size of objects, and irrelevant spatial
relations. The construction of a symbolic spatial index
from an input image can be thought of as a
transformation that keeps only a set of representative
points needed to define the relations of the modeling
space. By keeping the relative arrangements of the
representative points in symbolic spatial indexes and
discarding all other points, we maintain enough
information to answer queries regarding the spatial
relations of the modeling space without the need to
access the initial image or an object database.
Symbolic spatial indexes can be used to solve problems
involving route planning, composition of spatial
relations, and update operations.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "qualitative spatial information processing;
representation of direction and topological relations;
spatial data models; spatial query languages",
}
@Article{Lin:1994:TTI,
author = "King Ip Lin and H. V. Jagadish and Christos
Faloutsos",
title = "The {TV}-Tree: An Index Structure for High-Dimensional
Data",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "517--542",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:31 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb3.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Faloutsos:Christos.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jagadish:H=_V=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lin:King=Ip.html",
abstract = "We propose a file structure to index
high-dimensionality data, which are typically points in
some feature space. The idea is to use only a few of
the features, using additional features only when the
additional discriminatory power is absolutely
necessary. We present in detail the design of our tree
structure and the associated algorithms that handle
such `varying length' feature vectors. Finally, we
report simulation results, comparing the proposed
structure with the $R*$-tree, which is one of the most
successful methods for low-dimensionality spaces. The
results illustrate the superiority of our method, which
saves up to 80\% in disk accesses.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "query by content; similarity retrieval; spatial
index",
}
@Article{Anonymous:1994:SIS,
author = "Anonymous",
title = "Special issue on spatial database systems",
journal = j-VLDB-J,
volume = "3",
number = "4",
pages = "??--??",
month = oct,
year = "1994",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:31 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Constantopoulos:1995:SIB,
author = "Panos Constantopoulos and Matthias Jarke and John
Mylopoulos and Yannis Vassiliou",
title = "The Software Information Base: a Server for Reuse",
journal = j-VLDB-J,
volume = "4",
number = "1",
pages = "1--43",
month = jan,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:32 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Constantopoulos:Panos.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jarke:Matthias.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mylopoulos:John.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/v/Vassiliou:Yannis.html",
abstract = "We present an experimental software repository system
that provides organization, storage, management, and
access facilities for reusable software components. The
system, intended as part of an applications development
environment, supports the representation of information
about requirements, designs and implementations of
software, and offers facilities for visual presentation
of the software objects. This article details the
features and architecture of the repository system, the
technical challenges and the choices made for the
system development along with a usage scenario that
illustrates its functionality. The system has been
developed and evaluated within the context of the
ITHACA project, a technology integration/software
engineering project sponsored by the European
Communities through the ESPRIT program, aimed at
developing an integrated reuse-centered application
development and support environment based on
object-oriented techniques.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "conceptual modeling; information storage and
retrieval; object-oriented databases; reuse; software
engineering",
}
@Article{Clifton:1995:HDQ,
author = "Chris Clifton and Hector Garcia-Molina and David
Bloom",
title = "{HyperFile}: a Data and Query Model for Documents",
journal = j-VLDB-J,
volume = "4",
number = "1",
pages = "45--86",
month = jan,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:32 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Bloom:David.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Clifton:Chris.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Garcia=Molina:Hector.html",
abstract = "Non-quantitative information such as documents and
pictures pose interesting new problems in the database
world. Traditional data models and query languages do
not provide appropriate support for this information.
Such data are typically stored in file systems, which
do not provide the security, integrity, or query
features of database management systems. The hypertext
model has emerged as a good interface to this
information; however, {\em finding\/} information using
hypertext browsing does not scale well. We developed a
query interface that serves as an extension of the
browsing model of hypertext systems. These queries
minimize the repeated user interactions required to
locate data in a standard hypertext system. HyperFile
is a prototype data server interface. In this article,
we describe HyperFile, including a number of issues
such as query generation, query processing, and
indexing.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "hypertext; indexing; user interface",
}
@Article{Agrawal:1995:OSL,
author = "Divyakant Agrawal and Amr {El Abbadi} and Richard
Jeffers and Lijing Lin",
title = "Ordered Shared Locks for Real-Time Databases",
journal = j-VLDB-J,
volume = "4",
number = "1",
pages = "87--126",
month = jan,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:32 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Abbadi:Amr_El.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Agrawal:Divyakant.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jeffers:Richard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lin:Lijing.html",
abstract = "We propose locking protocols for real-time databases.
Our approach has two main motivations: First, locking
protocols are widely accepted and used in most database
systems. Second, in real-time databases it has been
shown that the blocking behavior of transactions in
locking protocols results in performance degradation.
We use a new relationship between locks called ordered
sharing to eliminate blocking that arises in the
traditional locking protocols. Ordered sharing
eliminates blocking of read and write operations but
may result in delayed termination. Since timeliness and
not response time is the crucial factor in real-time
databases, our protocols exploit this delay to allow
transactions to execute within the slacks of delayed
transactions. We compare the performance of the
proposed protocols with the two-phase locking protocol
for real-time databases. Our experiments indicate that
the proposed protocols significantly reduce the
percentage of missed deadlines in the system for a
variety of workloads.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; time-critical scheduling;
transaction management",
}
@Article{Dan:1995:CDA,
author = "Asit Dan and Philip S. Yu and Jen Yao Chung",
title = "Characterization of Database Access Pattern for
Analytic Prediction of Buffer Hit Probability",
journal = j-VLDB-J,
volume = "4",
number = "1",
pages = "127--154",
month = jan,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:32 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chung:Jen=Yao.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Dan:Asit.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yu:Philip_S=.html",
abstract = "The analytic prediction of buffer hit probability,
based on the characterization of database accesses from
real reference traces, is extremely useful for workload
management and system capacity planning. The knowledge
can be helpful for proper allocation of buffer space to
various database relations, as well as for the
management of buffer space for a mixed transaction and
query environment. Access characterization can also be
used to predict the buffer invalidation effect in a
multi-node environment which, in turn, can influence
transaction routing strategies. However, it is a
challenge to characterize the database access pattern
of a real workload reference trace in a simple manner
that can easily be used to compute buffer hit
probability. In this article, we use a characterization
method that distinguishes three types of access
patterns from a trace: (1) locality within a
transaction, (2) random accesses by transactions, and
(3) sequential accesses by long queries. We then
propose a concise way to characterize the access skew
across randomly accessed pages by logically grouping
the large number of data pages into a small number of
partitions such that the frequency of accessing each
page within a partition can be treated as equal. Based
on this approach, we present a recursive binary
partitioning algorithm that can infer the access skew
characterization from the buffer hit probabilities for
a subset of the buffer sizes. We validate the buffer
hit predictions for single and multiple node systems
using production database traces. We further show that
the proposed approach can predict the buffer hit
probability of a composite workload from those of its
component files.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access skew; analytic prediction; database access
characterization; reference trace; sequential access;
workload management",
}
@Article{Peckham:1995:DME,
author = "Joan Peckham and Bonnie MacKellar and Michael
Doherty",
title = "Data Model for Extensible Support of Explicit
Relationships in Design Databases",
journal = j-VLDB-J,
volume = "4",
number = "2",
pages = "157--191",
month = apr,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:33 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Doherty:Michael.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/MacKellar:Bonnie.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Peckham:Joan.html",
abstract = "We describe the conceptual model of SORAC, a data
modeling system developed at the University of Rhode
Island. SORAC supports both semantic objects and
relationships, and provides a tool for modeling
databases needed for complex design domains. SORAC's
set of built-in semantic relationships permits the
schema designer to specify enforcement rules that
maintain constraints on the object and relationship
types. SORAC then automatically generates C++ code to
maintain the specified enforcement rules, producing a
schema that is compatible with Ontos. This facilitates
the task of the schema designer, who no longer has to
ensure that all methods on object classes correctly
maintain necessary constraints. In addition, explicit
specification of enforcement rules permits automated
analysis of enforcement propagations. We compare the
interpretations of relationships within the semantic
and object-oriented models as an introduction to the
mixed model that SORAC supports. Next, the set of
built-in SORAC relationship types is presented in terms
of the enforcement rules permitted on each relationship
type. We then use the modeling requirements of an
architectural design support system, called
ArchObjects, to demonstrate the capabilities of SORAC.
The implementation of the current SORAC prototype is
also briefly discussed.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "computer-aided architectural design; database
constraints; relationship semantics; semantic and
object-oriented data modeling",
xxpages = "157--192",
}
@Article{Teniente:1995:UKB,
author = "Ernest Teniente and Antoni Oliv{\'e}",
title = "Updating Knowledge Bases While Maintaining Their
Consistency",
journal = j-VLDB-J,
volume = "4",
number = "2",
pages = "193--241",
month = apr,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:33 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/Oliv=eacute=:Antoni.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Teniente:Ernest.html",
abstract = "When updating a knowledge base, several problems may
arise. One of the most important problems is that of
integrity constraints satisfaction. The classic
approach to this problem has been to develop methods
for {\em checking\/} whether a given update violates an
integrity constraint. An alternative approach consists
of trying to repair integrity constraints violations by
performing additional updates that {\em maintain\/}
knowledge base consistency. Another major problem in
knowledge base updating is that of {\em view updating},
which determines how an update request should be
translated into an update of the underlying base facts.
We propose a new method for updating knowledge bases
while maintaining their consistency. Our method can be
used for both integrity constraints maintenance and
view updating. It can also be combined with any
integrity checking method for view updating and
integrity checking. The kind of updates handled by our
method are: updates of base facts, view updates,
updates of deductive rules, and updates of integrity
constraints. Our method is based on events and
transition rules, which explicitly define the
insertions and deletions induced by a knowledge base
update. Using these rules, an extension of the SLDNF
procedure allows us to obtain all possible minimal ways
of updating a knowledge base without violating any
integrity constraint.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "integrity checking; integrity maintenance; view
updating",
}
@Article{Guting:1995:RBS,
author = "Ralf Hartmut G{\"u}ting and Markus Schneider",
title = "Realm-Based Spatial Data Types: The {ROSE} Algebra",
journal = j-VLDB-J,
volume = "4",
number = "2",
pages = "243--286",
month = apr,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:33 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/G=uuml=ting:Ralf_Hartmut.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Schneider:Markus.html",
abstract = "Spatial data types or algebras for database systems
should (1) be fully general, that is, closed under set
operations, (2) have formally defined semantics, (3) be
defined in terms of finite representations available in
computers, (4) offer facilities to enforce geometric
consistency of related spatial objects, and (5) be
independent of a particular DBMS data model, but
cooperate with any. We present an algebra that uses
{\em realms\/} as geometric domains underlying spatial
data types. A realm, as a general database concept, is
a finite, dynamic, user-defined structure underlying
one or more system data types. Problems of numerical
robustness and topological correctness are solved
within and below the realm layer so that spatial
algebras defined above a realm have very nice algebraic
properties. Realms also interact with a DMBS to enforce
geometric consistency on object creation or update. The
ROSE algebra is defined on top of realms and offers
general types to represent point, line, and region
features, together with a comprehensive set of
operations. It is described within a polymorphic type
system and interacts with a DMBS data model and query
language through an abstract {\em object model
interface.} An example integration of ROSE into the
object-oriented data model $O^2$ and its query language
is presented.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "finite resolution; geometric consistency; numerical
robustness; object model interface; realm; topological
correctness",
}
@Article{Templeton:1995:IDC,
author = "Marjorie Templeton and Herbert Henley and Edward Maros
and Darrel J. {Van Buer}",
title = "{InterViso}: Dealing With the Complexity of Federated
Database Access",
journal = j-VLDB-J,
volume = "4",
number = "2",
pages = "287--317",
month = apr,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:33 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Buer:Darrel_J=_Van.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Henley:Herbert.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Maros:Edward.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Templeton:Marjorie.html",
abstract = "Connectivity products are finally available to provide
the `highways' between computers containing data. IBM
has provided strong validation of the concept with
their `Information Warehouse.' DBMS vendors are
providing gateways into their products, and SQL is
being retrofitted on many older DBMSs to make it easier
to access data from standard 4GL products and
application development systems. The next step needed
for data integration is to provide (1) a common data
dictionary with a conceptual schema across the data to
mask the many differences that occur when databases are
developed independently and (2) a server that can
access and integrate the databases using information
from the data dictionary. In this article, we discuss
InterViso, one of the first commercial federated
database products. InterViso is based on Mermaid, which
was developed at SDC and Unisys (Templeton et al.,
1987b). It provides a value added layer above
connectivity products to handle views across databases,
schema translation, and transaction management.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data warehouse; database integration; federated
database",
xxpages = "287--318",
}
@Article{Atkinson:1995:SIP,
author = "Malcolm P. Atkinson and Ronald Morrison",
title = "Special Issue on Persistent Object Systems:
Orthogonally Persistent Object Systems",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "319--401",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Atkinson:Malcolm_P=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Morrison:Ronald.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Atkinson:1995:OPO,
author = "Malcolm Atkinson and Ronald Morrison",
title = "Orthogonally persistent object systems",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "319--402",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:34 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Persistent Application Systems (PASs) are of
increasing social and economic importance. They have
the potential to be long-lived, concurrently accessed,
and consist of large bodies of data and programs.
Typical examples of PASs are CAD/CAM systems, office
automation, CASE tools, software engineering
environments, and patient-care support systems in
hospitals. Orthogonally persistent object systems are
intended to provide improved support for the design,
construction, maintenance, and operation of PASs.
Persistence abstraction allows the creation and
manipulation of data in a manner that is independent of
its lifetime, thereby integrating the database view of
information with the programming language view. This
yields a number of advantages in terms of orthogonal
design and programmer productivity which are beneficial
for PASs. Design principles have been proposed for
persistent systems. By following these principles,
languages that provide persistence as a basic
abstraction have been developed. In this paper, the
motivation for orthogonal persistence is reviewed along
with the above mentioned design principles. The
concepts for integrating programming languages and
databases through the persistence abstraction, and
their benefits, are given. The technology to support
persistence, the achievements, and future directions of
persistence research are then discussed.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database programming languages; orthogonal
persistence; persistent application systems; persistent
programming languages",
}
@Article{Albano:1995:FPL,
author = "Antonio Albano and Giorgio Ghelli and Renzo Orsini",
title = "{Fibonacci}: a Programming Language for Object
Databases",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "403--444",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:34 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Albano:Antonio.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Ghelli:Giorgio.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/Orsini:Renzo.html",
abstract = "Fibonacci is an object-oriented database programming
language characterized by static and strong typing, and
by new mechanisms for modeling databases in terms of
objects with roles, classes, and associations. A brief
introduction to the language is provided to present
those features, which are particularly suited to
modeling complex databases. Examples of the use of
Fibonacci are given with reference to the prototype
implementation of the language.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data models; database programming languages; objects
with roles",
}
@Article{Ozsu:1995:TUB,
author = "M. Tamer {\"O}zsu and Randal J. Peters and Duane
Szafron and Boman Irani and Anna Lipka and Adriana
Mu{\~n}oz",
title = "{TIGUKAT}: a Uniform Behavioral Objectbase
Management System",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "445--492",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:34 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/=/=Ouml=zsu:M=_Tamer.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/i/Irani:Boman.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lipka:Anna.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mu=ntilde=oz:Adriana.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Peters:Randal_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Szafron:Duane.html",
abstract = "We describe the TIGUKAT objectbase management system,
which is under development at the Laboratory for
Database Systems Research at the University of Alberta.
TIGUKAT has a novel object model, whose identifying
characteristics include a purely behavioral semantics
and a uniform approach to objects. Everything in the
system, including types, classes, collections,
behaviors, and functions, as well as meta-information,
is a first-class object with well-defined behavior. In
this way, the model abstracts everything, including
traditional structural notions such as instance
variables, method implementation, and schema
definition, into a uniform semantics of behaviors on
objects. Our emphasis in this article is on the object
model, its implementation, the persistence model, and
the query language. We also (briefly) present other
database management functions that are under
development such as the query optimizer, the version
control system, and the transaction manager.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database management; objectbase management; persistent
storage system; reflective system",
}
@Article{Benzaken:1995:TDP,
author = "V{\'e}ronique Benzaken and Anne Doucet",
title = "{Th{\'e}mis}: a Database Programming Language
Handling Integrity Constraints",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "493--517",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:34 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Benzaken:V=eacute=ronique.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Doucet:Anne.html",
abstract = "This article presents a database programming language,
Th{\'e}mis, which supports subtyping and class
hierarchies, and allows for the definition of integrity
constraints in a global and declarative way. We first
describe the salient features of the language: types,
names, classes, integrity constraints (including
methods), and transactions. The inclusion of methods
into integrity constraints allows an increase of the
declarative power of these constraints. Indeed, the
information needed to define a constraint is not always
stored in the database through attributes, but is
sometimes computed or derived data. Then, we address
the problem of efficiently checking constraints. More
specifically, we consider two different problems: (1)
statically reducing the number of constraints to be
checked, and (2) generating an efficient run-time
checker. Using simple strategies, one can significantly
improve the efficiency of the verification. We show how
to reduce the number of constraints to be checked by
characterizing the portions of the database that are
involved in both the constraints and in a transaction.
We also show how to generate efficient algorithms for
checking a large class of constraints. We show how all
the techniques presented exploit the underlying type
system, which provides significant help in solving (1)
and \1. Last, the current status of the Th{\'e}mis
prototype is presented.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database programming language; integrity constraints;
program analysis",
}
@Article{Kemper:1995:APS,
author = "Alfons Kemper and Donald Kossmann",
title = "Adaptable Pointer Swizzling Strategies in Object
Bases: Design, Realization, and Quantitative Analysis",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "519--566",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:34 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kemper:Alfons.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kossmann:Donald.html",
abstract = "In this article, different techniques for {\em
`pointer swizzling'\/} are classified and evaluated for
optimizing the access to main-memory resident
persistent objects. To speed up the access along
inter-object references, the persistent pointers in the
form of unique object identifiers (OIDs) are
transformed (swizzled) into main-memory pointers
(addresses). Pointer swizzling techniques can be
divided into two classes: (1) those that allow
replacement of swizzled objects from the buffer before
the end of an application program, and (2) those that
rule out the displacement of swizzled objects. The
first class (i.e., techniques that take `precautions'
for the replacement of swizzled objects) has not yet
been thoroughly investigated. Four different pointer
swizzling techniques allowing object replacement are
investigated and compared with the performance of an
object manager employing no pointer swizzling. The
extensive qualitative and quantitative
evaluation---only part of which could be presented in
this article---demonstrate that there is no {\em one\/}
superior pointer swizzling strategy for {\em all\/}
application profiles. Therefore, an adaptable object
base run-time system is devised that employs the full
range of pointer swizzling strategies, depending on the
application profile characteristics that are determined
by, for example, monitoring in combination with
sampling, user specifications, and/or program
analysis.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "object-oriented database systems; performance
evaluation; pointer swizzling",
xxpages = "519--567",
}
@Article{Anonymous:1995:SIP,
author = "Anonymous",
title = "Special issue on persistent object systems",
journal = j-VLDB-J,
volume = "4",
number = "3",
pages = "??--??",
month = jul,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:34 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Barbara:1995:SSO,
author = "Daniel Barbar{\'a} and Tomasz Imielinski",
title = "Special System-oriented Section: The Best of {SIGMOD}
1994: Sleepers and Workaholics: Caching Strategies in
Mobile Environments",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "567--602",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Barbar=aacute=:Daniel.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/i/Imielinski:Tomasz.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Barbara:1995:SWC,
author = "Daniel Barbar{\'a} and Tomasz Imieli{\'n}ski",
title = "Sleepers and workaholics: caching strategies in mobile
environments (extended version)",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "567--602",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In the mobile wireless computing environment of the
future, a large number of users, equipped with
low-powered palmtop machines, will query databases over
wireless communication channels. Palmtop-based units
will often be disconnected for prolonged periods of
time, due to battery power saving measures; palmtops
also will frequently relocate between different cells,
and will connect to different data servers at different
times. Caching of frequently accessed data items will
be an important technique that will reduce contention
on the narrow-bandwidth, wireless channel. However,
cache individualization strategies will be severely
affected by the disconnection and mobility of the
clients. The server may no longer know which clients
are currently residing under its cell, and which of
them are currently on. We propose a taxonomy of
different cache invalidation strategies, and study the
impact of clients' disconnection times on their
performance. We study ways to improve further the
efficiency of the invalidation techniques described. We
also describe how our techniques can be implemented
over different network environments.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "caching; data management; information services;
wireless",
}
@Article{Nyberg:1995:ACS,
author = "Chris Nyberg and Tom Barclay and Zarka Cvetanovic and
Jim Gray and David B. Lomet",
title = "{AlphaSort}: a Cache-Sensitive Parallel External
Sort",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "603--627",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Barclay:Tom.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Cvetanovic:Zarka.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Gray:Jim.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lomet:David_B=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Nyberg:Chris.html",
abstract = "A new sort algorithm, called AlphaSort, demonstrates
that commodity processors and disks can handle
commercial batch workloads. Using commodity processors,
memory, and arrays of SCSI disks, AlphaSort runs the
industry-standard sort benchmark in seven seconds. This
beats the best published record on a 32-CPU 32-disk
Hypercube by 8:1. On another benchmark, AlphaSort
sorted more than a gigabyte in one minute. AlphaSort is
a cache-sensitive, memory-intensive sort algorithm. We
argue that modern architectures require algorithm
designers to re-examine their use of the memory
hierarchy. AlphaSort uses clustered data structures to
get good cache locality, file striping to get high disk
bandwidth, QuickSort to generate runs, and
replacement-selection to merge the runs. It uses shared
memory multiprocessors to break the sort into subsort
chores. Because startup times are becoming a
significant part of the total time, we propose two new
benchmarks: (1) MinuteSort: how much can you sort in
one minute, and (2) PennySort: how much can you sort
for one penny.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "Alpha; cache; DEC 7000; disk; memory; parallel; sort;
striping",
xxpages = "603--628",
}
@Article{White:1995:QHP,
author = "Seth J. White and David J. DeWitt",
title = "{QuickStore}: a High Performance Mapped Object
Store",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "629--673",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/DeWitt:David_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/White:Seth_J=.html",
abstract = "QuickStore is a memory-mapped storage system for
persistent C++, built on top of the EXODUS Storage
Manager. QuickStore provides fast access to in-memory
objects by allowing application programs to access
objects via normal virtual memory pointers. This
article presents the results of a detailed performance
study using the OO7 benchmark. The study compares the
performance of QuickStore with the latest
implementation of the E programming language. The
QuickStore and E systems exemplify the two basic
approaches (hardware and software) that have been used
to implement persistence in object-oriented database
systems. In addition, both systems use the same
underlying storage manager and compiler, allowing us to
make a truly apples-to-apples comparison of the
hardware and software techniques.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "benchmark; client-server; memory-mapped;
object-oriented; performance; pointer swizzling",
}
@Article{Swami:1995:EPF,
author = "Arun N. Swami and K. Bernhard Schiefer",
title = "Estimating Page Fetches for Index Scans with Finite
{LRU} Buffers",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "675--701",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Schiefer:K=_Bernhard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Swami:Arun_N=.html",
abstract = "We describe an algorithm for estimating the number of
page fetches for a partial or complete scan of a B-tree
index. The algorithm obtains estimates for the number
of page fetches for an index scan when given the number
of tuples selected and the number of LRU buffers
currently available. The algorithm has an initial phase
that is performed exactly once before any estimates are
calculated. This initial phase, involving LRU buffer
modeling, requires a scan of all the index entries and
calculates the number of page fetches for different
buffer sizes. An approximate empirical model is
obtained from this data. Subsequently, an inexpensive
estimation procedure is called by the query optimizer
whenever it needs an estimate of the page fetches for
the index scan. This procedure utilizes the empirical
model obtained in the initial phase.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "estimation; index scan; LRU; query optimization",
}
@Article{Landau:1995:HQA,
author = "Gad M. Landau and Jeanette P. Schmidt and Vassilis J.
Tsotras",
title = "Historical queries along multiple lines of time
evolution",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "703--726",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Traditional approaches to addressing historical
queries assume a {\em single\/} line of time evolution;
that is, a system (database, relation) evolves over
time through a sequence of transactions. Each
transaction always applies to the unique, current state
of the system, resulting in a new current state. There
are, however, complex applications where the system's
state evolves into {\em multiple\/} lines of evolution.
In general, this creates a tree (hierarchy) of
evolution lines, where each tree node represents the
time evolution of a particular subsystem. Multiple
lines create novel historical queries, such as {\em
vertical\/} or {\em horizontal\/} historical queries.
The key characteristic of these problems is that
portions of the history are shared; answering
historical queries should not necessitate duplication
of shared histories as this could increase the storage
requirements dramatically. Both the vertical and
horizontal historical queries have two parts: a
`search' part, where the time of interest is located
together with the appropriate subsystem, and a
reconstruction part, where the subsystem's state is
reconstructed for that time. This article focuses on
the search part; several reconstruction methods,
designed for single evolution lines can be applied once
the appropriate time of interest is located. For both
the vertical and the horizontal historical queries, we
present algorithms that work without duplicating shared
histories. Combinations of the vertical and horizontal
queries are possible, and enable searching in both
dimensions of the tree of evolutions.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; CAD databases; data-structures;
rollback databases",
}
@Article{Landau:1995:RJA,
author = "Gad M. Landau and Jeanette P. Schmidt and Vassilis J.
Tsotras",
title = "Regular Journal Articles: Historical Queries Along
Multiple Lines of Time Evolution",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "703--726",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:01 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Landau:Gad_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Schmidt:Jeanette_P=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tsotras:Vassilis_J=.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Abiteboul:1995:PLM,
author = "Serge Abiteboul and Catriel Beeri",
title = "The Power of Languages for the Manipulation of Complex
Values",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "727--794",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb4.html;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Abiteboul:Serge.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Beeri:Catriel.html",
abstract = "Various models and languages for describing and
manipulating hierarchically structured data have been
proposed. Algebraic, calculus-based, and
logic-programming oriented languages have all been
considered. This article presents a general model for
complex values (i.e., values with hierarchical
structures), and languages for it based on the three
paradigms. The algebraic language generalizes those
presented in the literature; it is shown to be related
to the functional style of programming advocated by
Backus (1978). The notion of domain independence (from
relational databases) is defined, and syntactic
restrictions (referred to as safety conditions) on
calculus queries are formulated to guarantee domain
independence. The main results are: The
domain-independent calculus, the safe calculus, the
algebra, and the logic-programming oriented language
have equivalent expressive power. In particular,
recursive queries, such as the transitive closure, can
be expressed in each of the languages. For this result,
the algebra needs the powerset operation. A more
restricted version of safety is presented, such that
the restricted safe calculus is equivalent to the
algebra without the powerset. The results are extended
to the case where arbitrary functions and predicates
are used in the languages.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "complex object; complex value; database; database
model; query language",
}
@Article{Anonymous:1995:SSO,
author = "Anonymous",
title = "Special system-oriented section: the best of {SIGMOD}
`94",
journal = j-VLDB-J,
volume = "4",
number = "4",
pages = "??--??",
month = oct,
year = "1995",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:35 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{DeWitt:1996:POT,
author = "David J. {De Witt} and Jeffrey F. Naughton and John C.
Shafer and Shivakumar Venkataraman",
title = "Parallelizing {OODBMS} traversals: a performance
evaluation",
journal = j-VLDB-J,
volume = "5",
number = "1",
pages = "3--18",
month = jan,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:36 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/DeWitt:David_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Naughton:Jeffrey_F=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shafer:John_C=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/v/Venkataraman:Shivakumar.html;
http://link.springer.de/link/service/journals/00778/bibs/6005001/60050003.htm;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050003.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050003.ps.gz",
abstract = "In this paper we describe the design and
implementation of {\em ParSets}, a means of exploiting
parallelism in the SHORE OODBMS. We used ParSets to
parallelize the graph traversal portion of the OO7
OODBMS benchmark, and present speedup and scaleup
results from parallel SHORE running these traversals on
a cluster of commodity workstations connected by a
standard Ethernet. For some OO7 traversals, SHORE
achieved excellent speedup and scaleup; for other OO7
traversals, only marginal speedup and scaleup occurred.
The characteristics of these traversals shed light on
when the ParSet approach to parallelism can and cannot
be applied to speed up an application.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "Object-oriented database management systems;
Parallelism; ParSets; SHORE",
}
@Article{Sivasankaran:1996:PAR,
author = "Rajendran M. Sivasankaran and John A. Stankovic and
Donald F. Towsley and Bhaskar Purimetla and Krithi
Ramamritham",
title = "Priority Assignment in Real-Time Active Databases",
journal = j-VLDB-J,
volume = "5",
number = "1",
pages = "19--34",
month = jan,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:36 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Purimetla:Bhaskar.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramamritham:Krithi.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sivasankaran:Rajendran_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Stankovic:John_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Towsley:Donald_F=.html;
http://link.springer.de/link/service/journals/00778/bibs/6005001/60050019.htm;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050019.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050019.ps.gz",
abstract = "Active databases and real-time databases have been
important areas of research in the recent past. It has
been recognized that many benefits can be gained by
integrating real-time and active database technologies.
However, not much work has been done in the area of
transaction processing in real-time active databases.
This paper deals with an important aspect of
transaction processing in real-time active databases,
namely the problem of assigning priorities to
transactions. In these systems, time-constrained
transactions trigger other transactions during their
execution. We present three policies for assigning
priorities to parent, immediate and deferred
transactions executing on a multiprocessor system and
then evaluate the policies through simulation. The
policies use different amounts of semantic information
about transactions to assign the priorities. The
simulator has been validated against the results of
earlier published studies. We conducted experiments in
three settings: a task setting, a main memory database
setting and a disk-resident database setting. Our
results demonstrate that dynamically changing the
priorities of transactions, depending on their behavior
(triggering rules), yields a substantial improvement in
the number of triggering transactions that meet their
deadline in all three settings.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "Active databases; Coupling mode; Deadlines;
ECA-priority assignment; Real-time databases",
}
@Article{Keller:1996:PBC,
author = "Arthur M. Keller and Julie Basu",
title = "A Predicate-based Caching Scheme for Client-Server
Database Architectures",
journal = j-VLDB-J,
volume = "5",
number = "1",
pages = "35--47",
month = jan,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:36 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Basu:Julie.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Keller:Arthur_M=.html;
http://link.springer.de/link/service/journals/00778/bibs/6005001/60050035.htm;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050035.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050035.ps.gz",
abstract = "We propose a new client-side data-caching scheme for
relational databases with a central server and multiple
clients. Data are loaded into each client cache based
on queries executed on the central database at the
server. These queries are used to form predicates that
describe the cache contents. A subsequent query at the
client may be satisfied in its local cache if we can
determine that the query result is entirely contained
in the cache. This issue is called {\em cache
completeness}. A separate issue, {\em cache currency},
deals with the effect on client caches of updates
committed at the central database. We examine the
various performance tradeoffs and optimization issues
involved in addressing the questions of cache currency
and completeness using predicate descriptions and
suggest solutions that promote good dynamic behavior.
Lower query-response times, reduced message traffic,
higher server throughput, and better scalability are
some of the expected benefits of our approach over
commonly used relational server-side and object
ID-based or page-based client-side caching.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cache completeness; cache currency; caching; multiple
clients; relational databases",
}
@Article{Stonebraker:1996:MWA,
author = "Michael Stonebraker and Paul M. Aoki and Witold Litwin
and Avi Pfeffer and Adam Sah and Jeff Sidell and Carl
Staelin and Andrew Yu",
title = "{Mariposa}: a Wide-Area Distributed Database
System",
journal = j-VLDB-J,
volume = "5",
number = "1",
pages = "48--63",
month = jan,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:36 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Aoki:Paul_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Litwin:Witold.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Pfeffer:Avi.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sah:Adam.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sidell:Jeff.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Staelin:Carl.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Stonebraker:Michael.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yu:Andrew.html;
http://link.springer.de/link/service/journals/00778/bibs/6005001/60050048.htm;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050048.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050048.ps.gz",
abstract = "The requirements of wide-area distributed database
systems differ dramatically from those of local-area
network systems. In a wide-area network (WAN)
configuration, individual sites usually report to
different system administrators, have different access
and charging algorithms, install site-specific data
type extensions, and have different constraints on
servicing remote requests. Typical of the last point
are production transaction environments, which are
fully engaged during normal business hours, and cannot
take on additional load. Finally, there may be many
sites participating in a WAN distributed DBMS.In this
world, a single program performing global query
optimization using a cost-based optimizer will not work
well. Cost-based optimization does not respond well to
site-specific type extension, access constraints,
charging algorithms, and time-of-day constraints.
Furthermore, traditional cost-based distributed
optimizers do not scale well to a large number of
possible processing sites. Since traditional
distributed DBMSs have all used cost-based optimizers,
they are not appropriate in a WAN environment, and a
new architecture is required. We have proposed and
implemented an economic paradigm as the solution to
these issues in a new distributed DBMS called Mariposa.
In this paper, we present the architecture and
implementation of Mariposa and discuss early feedback
on its operating characteristics.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "autonomy; databases; distributed systems; economic
site; name service; wide-area network",
}
@Article{Harris:1996:JAC,
author = "Evan P. Harris and Kotagiri Ramamohanarao",
title = "Join Algorithm Costs Revisited",
journal = j-VLDB-J,
volume = "5",
number = "1",
pages = "64--84",
month = jan,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:36 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Harris:Evan_P=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramamohanarao:Kotagiri.html;
http://link.springer.de/link/service/journals/00778/bibs/6005001/60050064.htm;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050064.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050064.ps.gz",
abstract = "A method of analysing join algorithms based upon the
time required to access, transfer and perform the
relevant CPU-based operations on a disk page is
proposed. The costs of variations of several of the
standard join algorithms, including nested block,
sort-merge, GRACE hash and hybrid hash, are presented.
For a given total buffer size, the cost of these join
algorithms depends on the parts of the buffer allocated
for each purpose. For example, when joining two
relations using the nested block join algorithm, the
amount of buffer space allocated for the outer and
inner relations can significantly affect the cost of
the join. Analysis of expected and experimental results
of various join algorithms show that a combination of
the optimal nested block and optimal GRACE hash join
algorithms usually provide the greatest cost benefit,
unless the relation size is a small multiple of the
memory size. Algorithms to quickly determine a buffer
allocation producing the minimal cost for each of these
algorithms are presented. When the relation size is a
small multiple of the amount of main memory available
(typically up to three to six times), the hybrid hash
join algorithm is preferable.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "join algorithms; minimisation; optimal buffer
allocation",
}
@Article{Ramamritham:1996:TCC,
author = "Krithi Ramamritham and Panos K. Chrysanthis",
title = "A taxonomy of correctness criteria in database
applications (*)",
journal = j-VLDB-J,
volume = "5",
number = "1",
pages = "85--97",
month = jan,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:36 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chrysanthis:Panos_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramamritham:Krithi.html;
http://link.springer.de/link/service/journals/00778/bibs/6005001/60050085.htm;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050085.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005001/60050085.ps.gz",
abstract = "Whereas serializability captures {\em database
consistency requirements\/} and {\em transaction
correctness properties\/} via a single notion, recent
research has attempted to come up with correctness
criteria that view these two types of requirements
independently. The search for more flexible correctness
criteria is partly motivated by the introduction of new
transaction models that extend the traditional atomic
transaction model. These extensions came about because
the atomic transaction model in conjunction with
serializability is found to be very constraining when
used in advanced applications (e.g., design databases)
that function in distributed, cooperative, and
heterogeneous environments. In this article we develop
a taxonomy of various {\em correctness criteria\/} that
focus on database consistency requirements and
transaction correctness properties from the viewpoint
of {\em what\/} the different dimensions of these two
are. This taxonomy allows us to categorize correctness
criteria that have been proposed in the literature. To
help in this categorization, we have applied a uniform
specification technique, based on ACTA, to express the
various criteria. Such a categorization helps shed
light on the similarities and differences between
different criteria and places them in perspective.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; database correctness criteria;
formal specifications; transaction processing",
}
@Article{Tsatalos:1996:GVT,
author = "Odysseas G. Tsatalos and Marvin H. Solomon and Yannis
E. Ioannidis",
title = "The {GMAP}: a Versatile Tool for Physical Data
Independence",
journal = j-VLDB-J,
volume = "5",
number = "2",
pages = "101--118",
month = apr,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:38 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/i/Ioannidis:Yannis_E=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Solomon:Marvin_H=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tsatalos:Odysseas_G=.html;
http://link.springer.de/link/service/journals/00778/bibs/6005002/60050101.htm;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050101.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050101.ps.gz",
abstract = "Physical data independence is touted as a central
feature of modern database systems. It allows users to
frame queries in terms of the logical structure of the
data, letting a query processor automatically translate
them into optimal plans that access physical storage
structures. Both relational and object-oriented
systems, however, force users to frame their queries in
terms of a logical schema that is directly tied to
physical structures. We present an approach that
eliminates this dependence. All storage structures are
defined in a declarative language based on relational
algebra as functions of a logical schema. We present an
algorithm, integrated with a conventional query
optimizer, that translates queries over this logical
schema into plans that access the storage structures.
We also show how to compile update requests into plans
that update all relevant storage structures
consistently and optimally. Finally, we report on
experiments with a prototype implementation of our
approach that demonstrate how it allows storage
structures to be tuned to the expected or observed
workload to achieve significantly better performance
than is possible with conventional techniques.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "indexing; materialized views; physical data
independence; physical database design",
}
@Article{Poulovassilis:1996:AQO,
author = "Alexandra Poulovassilis and Carol Small",
title = "Algebraic Query Optimisation for Database Programming
Languages",
journal = j-VLDB-J,
volume = "5",
number = "2",
pages = "119--132",
month = apr,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:38 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Poulovassilis:Alexandra.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Small:Carol.html;
http://link.springer.de/link/service/journals/00778/bibs/6005002/60050119.htm;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050119.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050119.ps.gz",
abstract = "A major challenge still facing the designers and
implementors of database programming languages (DBPLs)
is that of query optimisation. We investigate algebraic
query optimisation techniques for DBPLs in the context
of a purely declarative functional language that
supports sets as first-class objects. Since the
language is computationally complete issues such as
non-termination of expressions and construction of
infinite data structures can be investigated, whilst
its declarative nature allows the issue of side effects
to be avoided and a richer set of equivalences to be
developed. The language has a well-defined semantics
which permits us to reason formally about the
properties of expressions, such as their equivalence
with other expressions and their termination. The
support of a set bulk data type enables much prior work
on the optimisation of relational languages to be
utilised. In the paper we first give the syntax of our
archetypal DBPL and briefly discuss its semantics. We
then define a small but powerful algebra of operators
over the set data type, provide some key equivalences
for expressions in these operators, and list
transformation principles for optimising expressions.
Along the way, we identify some caveats to well-known
equivalences for non-deductive database languages. We
next extend our language with two higher level
constructs commonly found in functional DBPLs: set
comprehensions and functions with known inverses. Some
key equivalences for these constructs are provided, as
are transformation principles for expressions in them.
Finally, we investigate extending our equivalences for
the set operators to the analogous operators over bags.
Although developed and formally proved in the context
of a functional language, our findings are directly
applicable to other DBPLs of similar expressiveness.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "algebraic manipulation; database management; database
programming languages; functional languages; query
optimisation",
}
@Article{Amiel:1996:TSR,
author = "Eric Amiel and Marie-Jo Bellosta and Eric Dujardin and
Eric Simon",
title = "Type-safe Relaxing of Schema Consistency Rules for
Flexible Modeling in {OODBMS}",
journal = j-VLDB-J,
volume = "5",
number = "2",
pages = "133--150",
month = apr,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:38 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Amiel:Eric.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Bellosta:Marie=Jo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Dujardin:Eric.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Simon:Eric.html;
http://link.springer.de/link/service/journals/00778/bibs/6005002/60050133.htm;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050133.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050133.ps.gz",
abstract = "Object-oriented databases enforce behavioral schema
consistency rules to guarantee type safety, i.e., that
no run-time type error can occur. When the schema must
evolve, some schema updates may violate these rules. In
order to maintain behavioral schema consistency,
traditional solutions require significant changes to
the types, the type hierarchy and the code of existing
methods. Such operations are very expensive in a
database context. To ease schema evolution, we propose
to support exceptions to the behavioral consistency
rules without sacrificing type safety. The basic idea
is to detect unsafe statements in a method code at
compile-time and check them at run-time. The run-time
check is performed by a specific clause that is
automatically inserted around unsafe statements. This
check clause warns the programmer of the safety problem
and lets him provide exception-handling code. Schema
updates can therefore be performed with only minor
changes to the code of methods.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "contravariance; covariance; object-oriented databases;
schema evolution; type safety",
xxtitle = "Type-safe relaxing of schema consistency rules for
flexible modelling in {OODBMS}",
}
@Article{Fang:1996:EOB,
author = "Doug Fang and Shahram Ghandeharizadeh and Dennis
McLeod",
title = "An experimental object-based sharing system for
networked databases",
journal = j-VLDB-J,
volume = "5",
number = "2",
pages = "151--165",
month = apr,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:38 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Fang:Doug.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Ghandeharizadeh:Shahram.html;
http://link.springer.de/link/service/journals/00778/bibs/6005002/60050151.htm;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050151.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005002/60050151.ps.gz;
http://link.springer.de/link/service/journals/00778/tocs/mailto: HREF="mailto:helpdesk@link.springer.de">helpdesk@link.springer.de",
abstract = "An approach and mechanism for the transparent sharing
of objects in an environment of interconnected
(networked), autonomous database systems is presented.
An experimental prototype system has been designed and
implemented, and an analysis of its performance
conducted. Previous approaches to sharing in this
environment typically rely on the use of a global,
integrated conceptual database schema; users and
applications must pose queries at this new level of
abstraction to access remote information. By contrast,
our approach provides a mechanism that allows users to
import remote objects directly into their local
database transparently; access to remote objects is
virtually the same as access to local objects. The
experimental prototype system that has been designed
and implemented is based on the Iris and Omega
object-based database management systems; this system
supports the sharing of data and meta-data objects
(information units) as well as units of behavior. The
results of experiments conducted to evaluate the
performance of our mechanism demonstrate the
feasibility of database transparent object sharing in a
federated environment, and provide insight into the
performance overhead and tradeoffs involved.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database system interoperability; experimental
prototype benchmarking; object sharing",
xxtitle = "An Experimental System for Object-Based Sharing in
Federated Databases",
}
@Article{Dey:1996:CTR,
author = "Debabrata Dey and Terence M. Barron and Veda C.
Storey",
title = "A Complete Temporal Relational Algebra",
journal = j-VLDB-J,
volume = "5",
number = "3",
pages = "167--180",
month = aug,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Barron:Terence_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Dey:Debabrata.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Storey:Veda_C=.html;
http://link.springer.de/link/service/journals/00778/bibs/6005003/60050167.htm;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050167.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050167.ps.gz",
abstract = "Various temporal extensions to the relational model
have been proposed. All of these, however, deviate
significantly from the original relational model. This
paper presents a temporal extension of the relational
algebra that is not significantly different from the
original relational model, yet is at least as
expressive as any of the previous approaches. This
algebra employs multidimensional tuple time-stamping to
capture the complete temporal behavior of data. The
basic relational operations are redefined as consistent
extensions of the existing operations in a manner that
preserves the basic algebraic equivalences of the
snapshot (i.e., conventional static) algebra. A new
operation, namely {\em temporal projection}, is
introduced. The complete update semantics are formally
specified and aggregate functions are defined. The
algebra is closed, and reduces to the snapshot algebra.
It is also shown to be at least as expressive as the
calculus-based temporal query language TQuel. In order
to assess the algebra, it is evaluated using a set of
twenty-six criteria proposed in the literature, and
compared to existing temporal relational algebras. The
proposed algebra appears to satisfy more criteria than
any other existing algebra.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "historical databases; relational algebra; temporal
databases; transaction time; valid time",
remark = "Check month: July or August??",
}
@Article{Shyy:1996:DIK,
author = "Yuh-Ming Shyy and Javier Arroyo and Stanley Y. W. Su
and Herman Lam",
title = "The Design and Implementation of {K}: a High-Level
Knowledge-Base Programming Language of {OSAM*.KBMS}",
journal = j-VLDB-J,
volume = "5",
number = "3",
pages = "181--195",
month = aug,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Arroyo:Javier.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lam:Herman.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shyy:Yuh=Ming.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Su:Stanley_Y=_W=.html;
http://link.springer.de/link/service/journals/00778/bibs/6005003/60050181.htm;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050181.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050181.ps.gz",
abstract = "The OSAM*.KBMS is a knowledge-base management system,
or the so-called next-generation database management
system, for non-traditional data/knowledge-intensive
applications. In order to define, query, and manipulate
a knowledge base, as well as to write codes to
implement any application system, we have developed an
object-oriented knowledge-base programming language
called K to serve as the high-level interface of
OSAM*.KBMS. This paper presents the design of K, its
implementation, and its supporting KBMS developed at
the Database Systems Research and Development Center of
the University of Florida.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "abstractions; association patterns; knowledge-base
programming language; object-oriented knowledge model;
structural associations",
remark = "Check month: July or August??",
}
@Article{Harder:1996:APS,
author = "Theo H{\"a}rder and Joachim Reinert",
title = "Access Path Support for Referential Integrity in
{SQL2}",
journal = j-VLDB-J,
volume = "5",
number = "3",
pages = "196--214",
month = aug,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/H=auml=rder:Theo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Reinert:Joachim.html;
http://link.springer.de/link/service/journals/00778/bibs/6005003/60050196.htm;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050196.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050196.ps.gz",
abstract = "The relational model of data incorporates fundamental
assertions for entity integrity and referential
integrity. Recently, these so-called relational
invariants were more precisely specified by the new
SQL2 standard. Accordingly, they have to be guaranteed
by a relational DBMS to its users and, therefore, all
issues of semantics and implementation became very
important. The specification of referential integrity
embodies quite a number of complications including the
MATCH clause and a collection of referential actions.
In particular, $\hbox{{\tt MATCH PARTIAL}}$ turns out
to be hard to understand and, if applied, difficult and
expensive to maintain. In this paper, we identify the
functional requirements for preserving referential
integrity. At a level free of implementational
considerations, the number and kinds of searches
necessary for referential integrity maintenance are
derived. Based on these findings, our investigation is
focused on the question of how the functional
requirements can be supported by implementation
concepts in an efficient way. We determine the search
cost for referential integrity maintenance (in terms of
page references) for various possible access path
structures. Our main result is that a combined access
path structure is the most appropriate for checking the
regular MATCH option, whereas $\hbox{{\tt MATCH
PARTIAL}}$ requires very expensive and complicated
check procedures. If it cannot be avoided at all, the
best support is achieved by a combination of multiple
$\mbox{B}^*$-trees.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access path support; MATCH clause; referential
integrity; relational databases; SQL2",
remark = "Check month: July or August??",
}
@Article{Ooi:1996:INE,
author = "Beng Chin Ooi and Jiawei Han and Hongjun Lu and Kian
Lee Tan",
title = "Index Nesting --- An Efficient Approach to Indexing in
Object-Oriented Databases",
journal = j-VLDB-J,
volume = "5",
number = "3",
pages = "215--228",
month = aug,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Han:Jiawei.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lu:Hongjun.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/Ooi:Beng_Chin.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tan:Kian=Lee.html;
http://link.springer.de/link/service/journals/00778/bibs/6005003/60050215.htm;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050215.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005003/60050215.ps.gz;
http://link.springer.de/link/service/journals/00778/tocs/mailto: HREF="mailto:helpdesk@link.springer.de">helpdesk@link.springer.de",
abstract = "In object-oriented database systems where the concept
of the superclass-subclass is supported, an instance of
a subclass is also an instance of its superclass.
Consequently, the access scope of a query against a
class in general includes the access scope of all its
subclasses, unless specified otherwise. An index to
support superclass-subclass relationship efficiently
must provide efficient associative retrievals of
objects from a single class or from several classes in
a class hierarchy. This paper presents an efficient
index called the hierarchical tree (the H-tree). For
each class, an H-tree is maintained, allowing efficient
search on a single class. These H-trees are
appropriately linked to capture the superclass-subclass
relationships, thus allowing efficient retrievals of
instances from a class hierarchy. Both experimental and
analytical results indicate that the H-tree is an
efficient indexing structure.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "indexing structures; OODB; query retrieval",
remark = "Check month: July or August??",
}
@Article{Antoshenkov:1996:QPO,
author = "Gennady Antoshenkov and Mohamed Ziauddin",
title = "Query Processing and Optimization in {Oracle Rdb}",
journal = j-VLDB-J,
volume = "5",
number = "4",
pages = "229--237",
month = dec,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Antoshenkov:Gennady.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Ziauddin:Mohamed.html;
http://link.springer.de/link/service/journals/00778/bibs/6005004/60050229.htm;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050229.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050229.ps.gz",
abstract = "This paper contains an overview of the technology used
in the query processing and optimization component of
Oracle Rdb, a relational database management system
originally developed by Digital Equipment Corporation
and now under development by Oracle Corporation. Oracle
Rdb is a production system that supports the most
demanding database applications, runs on multiple
platforms and in a variety of environments.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "dynamic optimization; optimizer; query transformation;
relational database; sampling",
}
@Article{Mylopoulos:1996:BKB,
author = "John Mylopoulos and Vinay K. Chaudhri and Dimitris
Plexousakis and Adel Shrufi and Thodoros Topologlou",
title = "Building Knowledge Base Management Systems",
journal = j-VLDB-J,
volume = "5",
number = "4",
pages = "238--263",
month = dec,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chaudhri:Vinay_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mylopoulos:John.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Plexousakis:Dimitris.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shrufi:Adel.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Topaloglou:Thodoros.html;
http://link.springer.de/link/service/journals/00778/bibs/6005004/60050238.htm;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050238.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050238.ps.gz",
abstract = "Advanced applications in fields such as CAD, software
engineering, real-time process control, corporate
repositories and digital libraries require the
construction, efficient access and management of large,
shared knowledge bases. Such knowledge bases cannot be
built using existing tools such as expert system
shells, because these do not scale up, nor can they be
built in terms of existing database technology, because
such technology does not support the rich
representational structure and inference mechanisms
required for knowledge-based systems. This paper
proposes a generic architecture for a knowledge base
management system intended for such applications. The
architecture assumes an object-oriented knowledge
representation language with an assertional sublanguage
used to express constraints and rules. It also provides
for general-purpose deductive inference and
special-purpose temporal reasoning. Results reported in
the paper address several knowledge base management
issues. For storage management, a new method is
proposed for generating a logical schema for a given
knowledge base. Query processing algorithms are offered
for semantic and physical query optimization, along
with an enhanced cost model for query cost estimation.
On concurrency control, the paper describes a novel
concurrency control policy which takes advantage of
knowledge base structure and is shown to outperform
two-phase locking for highly structured knowledge bases
and update-intensive transactions. Finally, algorithms
for compilation and efficient processing of constraints
and rules during knowledge base operations are
described. The paper describes original results,
including novel data structures and algorithms, as well
as preliminary performance evaluation data. Based on
these results, we conclude that knowledge base
management systems which can accommodate large
knowledge bases are feasible.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; constraint enforcement; knowledge
base management systems; rule management; storage
management",
}
@Article{Becker:1996:AOM,
author = "Bruno Becker and Stephan Gschwind and Thomas Ohler and
Bernhard Seeger and Peter Widmayer",
title = "An Asymptotically Optimal Multiversion {B}-Tree",
journal = j-VLDB-J,
volume = "5",
number = "4",
pages = "264--275",
month = dec,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Becker:Bruno.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Gschwind:Stephan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/Ohler:Thomas.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Seeger:Bernhard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Widmayer:Peter.html;
http://link.springer.de/link/service/journals/00778/bibs/6005004/60050264.htm;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050264.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050264.ps.gz",
abstract = "In a variety of applications, we need to keep track of
the development of a data set over time. For
maintaining and querying these multiversion data
efficiently, external storage structures are an
absolute necessity. We propose a multiversion B-tree
that supports insertions and deletions of data items at
the current version and range queries and exact match
queries for any version, current or past. Our
multiversion B-tree is asymptotically optimal in the
sense that the time and space bounds are asymptotically
the same as those of the (single-version) B-tree in the
worst case. The technique we present for transforming a
(single-version) B-tree into a multiversion B-tree is
quite general: it applies to a number of hierarchical
external access structures with certain properties
directly, and it can be modified for others.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; information systems; physical design;
versioned data",
}
@Article{Kashyap:1996:SSS,
author = "Vipul Kashyap and Amit P. Sheth",
title = "Semantic and Schematic Similarities Between Database
Objects: a Context-Based Approach",
journal = j-VLDB-J,
volume = "5",
number = "4",
pages = "276--304",
month = dec,
year = "1996",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:39 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb5.html;
http://link.springer.de/link/service/journals/00778/tocs/t6005004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kashyap:Vipul.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sheth:Amit_P=.html;
http://link.springer.de/link/service/journals/00778/bibs/6005004/60050276.htm;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050276.pdf;
http://link.springer.de/link/service/journals/00778/papers/6005004/60050276.ps.gz;
http://link.springer.de/link/service/journals/00778/tocs/mailto: HREF="mailto:helpdesk@link.springer.de">helpdesk@link.springer.de",
abstract = "In a multidatabase system, schematic conflicts between
two objects are usually of interest only when the
objects have some semantic similarity. We use the
concept of {\em semantic proximity}, which is
essentially an {\em abstraction/mapping\/} between the
domains of the two objects associated with the {\em
context of comparison}. An explicit though partial
context representation is proposed and the specificity
relationship between contexts is defined. The contexts
are organized as a meet semi-lattice and associated
operations like the greatest lower bound are defined.
The context of comparison and the type of abstractions
used to relate the two objects form the basis of a
semantic taxonomy. At the {\em semantic level}, the
intensional description of database objects provided by
the context is expressed using description logics. The
terms used to construct the contexts are obtained from
{\em domain-specific ontologies}. {\em Schema
correspondences\/} are used to store mappings from the
semantic level to the data level and are associated
with the respective contexts. Inferences about database
content at the federation level are modeled as changes
in the context and the associated schema
correspondences. We try to reconcile the dual
(schematic and semantic) perspectives by enumerating
{\em possible semantic similarities\/} between objects
having schema and data conflicts, and modeling schema
correspondences as the projection of semantic proximity
{\em with respect to (wrt)\/} context.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Evangelidis:1997:HTM,
author = "Georgios Evangelidis and David B. Lomet and Betty
Salzberg",
title = "The {hB} $^{\Pi}$-tree: a multi-attribute index
supporting concurrency, recovery and node
consolidation",
journal = j-VLDB-J,
volume = "6",
number = "1",
pages = "1--25",
month = feb,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:40 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/e/Evangelidis:Georgios.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lomet:David_B=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Salzberg:Betty.html;
http://link.springer.de/link/service/journals/00778/bibs/7006001/70060001.htm;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060001.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060001.ps.gz",
abstract = "We propose a new multi-attribute index. Our approach
combines the hB-tree, a multi-attribute index, and the
$\Pi$-tree, an abstract index which offers efficient
concurrency and recovery methods. We call the resulting
method the hB $^\Pi$-tree. We describe several versions
of the hB $^\Pi$-tree, each using a different
node-splitting and index-term-posting algorithm. We
also describe a new node deletion algorithm. We have
implemented all the versions of the hB $^\Pi$-tree. Our
performance results show that even the version that
offers no performance guarantees, actually performs
very well in terms of storage utilization, index size
(fan-out), exact-match and range searching, under
various data types and distributions. We have also
shown that our index is fairly insensitive to increases
in dimension. Thus, it is suitable for indexing
high-dimensional applications. This property and the
fact that all our versions of the hB $^\Pi$-tree can
use the $\Pi$-tree concurrency and recovery algorithms
make the hB $^\Pi$-tree a promising candidate for
inclusion in a general-purpose DBMS.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency; multi-attribute index; node
consolidation; recovery",
remark = "Check month: January or February??",
}
@Article{Antoshenkov:1997:DBO,
author = "Gennady Antoshenkov",
title = "Dictionary-based order-preserving string compression
(*)",
journal = j-VLDB-J,
volume = "6",
number = "1",
pages = "26--39",
month = feb,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:40 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Antoshenkov:Gennady.html;
http://link.springer.de/link/service/journals/00778/bibs/7006001/70060026.htm;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060026.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060026.ps.gz",
abstract = "As no database exists without indexes, no index
implementation exists without order-preserving key
compression, in particular, without prefix and tail
compression. However, despite the great potentials of
making indexes smaller and faster, application of
general compression methods to ordered data sets has
advanced very little. This paper demonstrates that the
fast dictionary-based methods can be applied to
order-preserving compression almost with the same
freedom as in the general case. The proposed new
technology has the same speed and a compression rate
only marginally lower than the traditional
order-indifferent dictionary encoding. Procedures for
encoding and generating the encode tables are described
covering such order-related features as ordered data
set restrictions, sensitivity and insensitivity to a
character position, and one-symbol encoding of each
frequent trailing character sequence. The experimental
results presented demonstrate five-folded compression
on real-life data sets and twelve-folded compression on
Wisconsin benchmark text fields.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "indexing; order-preserving key compression",
remark = "Check month: January or February??",
}
@Article{Singhal:1997:ALB,
author = "Vigyan Singhal and Alan Jay Smith",
title = "Analysis of Locking Behavior in Three Real Database
Systems",
journal = j-VLDB-J,
volume = "6",
number = "1",
pages = "40--52",
month = feb,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:40 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Singhal:Vigyan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Smith:Alan_Jay.html;
http://link.springer.de/link/service/journals/00778/bibs/7006001/70060040.htm;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060040.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060040.ps.gz",
abstract = "Concurrency control is essential to the correct
functioning of a database due to the need for correct,
reproducible results. For this reason, and because
concurrency control is a well-formulated problem, there
has developed an enormous body of literature studying
the performance of concurrency control algorithms. Most
of this literature uses either analytic modeling or
random number-driven simulation, and explicitly or
implicitly makes certain assumptions about the behavior
of transactions and the patterns by which they set and
unset locks. Because of the difficulty of collecting
suitable measurements, there have been only a few
studies which use trace-driven simulation, and still
less study directed toward the characterization of
concurrency control behavior of real workloads. In this
paper, we present a study of three database workloads,
all taken from IBM DB2 relational database systems
running commercial applications in a production
environment. This study considers topics such as
frequency of locking and unlocking, deadlock and
blocking, duration of locks, types of locks,
correlations between applications of lock types,
two-phase versus non-two-phase locking, when locks are
held and released, etc. In each case, we evaluate the
behavior of the workload relative to the assumptions
commonly made in the research literature and discuss
the extent to which those assumptions may or may not
lead to erroneous conclusions.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; trace-driven simulation; workload
characterization",
remark = "Check month: January or February??",
}
@Article{Mehta:1997:DPS,
author = "Manish Mehta and David J. DeWitt",
title = "Data placement in shared-nothing parallel database
systems (*)",
journal = j-VLDB-J,
volume = "6",
number = "1",
pages = "53--72",
month = feb,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:40 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/DeWitt:David_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mehta:Manish.html;
http://link.springer.de/link/service/journals/00778/bibs/7006001/70060053.htm;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060053.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006001/70060053.ps.gz;
http://link.springer.de/link/service/journals/00778/tocs/mailto: HREF="mailto:helpdesk@link.springer.de">helpdesk@link.springer.de",
abstract = "Data placement in shared-nothing database systems has
been studied extensively in the past and various
placement algorithms have been proposed. However, there
is no consensus on the most efficient data placement
algorithm and placement is still performed manually by
a database administrator with periodic reorganization
to correct mistakes. This paper presents the first
comprehensive simulation study of data placement issues
in a shared-nothing system. The results show that
current hardware technology trends have significantly
changed the performance tradeoffs considered in past
studies. A simplistic data placement strategy based on
the new results is developed and shown to perform well
for a variety of workloads.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "declustering; disk allocation; resource allocation;
resource scheduling",
remark = "Check month: January or February??",
}
@Article{Papazoglou:1997:DMO,
author = "Mike P. Papazoglou and Bernd J. Kr{\"a}mer",
title = "A Database Model for Object Dynamics",
journal = j-VLDB-J,
volume = "6",
number = "2",
pages = "73--96",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:41 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition. See erratum
\cite{Papazoglou:1997:EDM}.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kr=auml=mer:Bernd_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Papazoglou:Mike_P=.html;
http://link.springer.de/link/service/journals/00778/bibs/7006002/70060073.htm;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060073.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060073.ps.gz",
abstract = "To effectively model complex applications in which
constantly changing situations can be represented, a
database system must be able to support the runtime
specification of structural and behavioral nuances for
objects on an individual or group basis. This paper
introduces the role mechanism as an extension of
object-oriented databases to support unanticipated
behavioral oscillations for objects that may attain
many types and share a single object identity. A role
refers to the ability to represent object dynamics by
seamlessly integrating idiosyncratic behavior, possibly
in response to external events, with pre-existing
object behavior specified at instance creation time. In
this manner, the same object can simultaneously be an
instance of different classes which symbolize the
different roles that this object assumes. The role
concept and its underlying linguistic scheme simplify
the design requirements of complex applications that
need to create and manipulate dynamic objects.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "dynamic class hierarchy; dynamic object
re-classification; object migration; object role model;
object-oriented database systems",
remark = "Check month: May or August??",
}
@Article{Catarci:1997:GIH,
author = "Tiziana Catarci and Giuseppe Santucci and John
Cardiff",
title = "Graphical interaction with heterogeneous databases
(*)",
journal = j-VLDB-J,
volume = "6",
number = "2",
pages = "97--120",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:41 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Cardiff:John.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Catarci:Tiziana.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Santucci:Giuseppe.html;
http://link.springer.de/link/service/journals/00778/bibs/7006002/70060097.htm;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060097.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060097.ps.gz",
abstract = "During the past few years our research efforts have
been inspired by two different needs. On one hand, the
number of non-expert users accessing databases is
growing apace. On the other, information systems will
no longer be characterized by a single centralized
architecture, but rather by several heterogeneous
component systems. In order to address such needs we
have designed a new query system with both
user-oriented and multidatabase features. The system's
main components are an adaptive visual interface,
providing the user with different and interchangeable
interaction modalities, and a ``translation layer'',
which creates and offers to the user the illusion of a
single homogeneous schema out of several heterogeneous
components. Both components are founded on a common
ground, i.e. a formally defined and semantically rich
data model, the Graph Model, and a minimal set of
Graphical Primitives, in terms of which general query
operations may be visually expressed. The Graph Model
has a visual syntax, so that graphical operations can
be applied on its components without unnecessary
mappings, and an object-based semantics. The aim of
this paper is twofold. We first present an overall view
of the system architecture and then give a
comprehensive description of the lower part of the
system itself. In particular, we show how schemata
expressed in different data models can be translated in
terms of Graph Model, possibly by exploiting reverse
engineering techniques. Moreover, we show how mappings
can be established between well-known query languages
and the Graphical Primitives. Finally, we describe in
detail how queries expressed by using the Graphical
Primitives can be translated in terms of relational
expressions so to be processed by actual DBMSs.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
remark = "Check month: May or August??",
}
@Article{Chen:1997:AHF,
author = "Ming-Syan Chen and Hui-I Hsiao and Philip S. Yu",
title = "On Applying Hash Filters to Improving the Execution of
Multi-Join Queries",
journal = j-VLDB-J,
volume = "6",
number = "2",
pages = "121--131",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:41 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chen:Ming=Syan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hsiao:Hui=I.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yu:Philip_S=.html;
http://link.springer.de/link/service/journals/00778/bibs/7006002/70060121.htm;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060121.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060121.ps.gz",
abstract = "In this paper, we explore an approach of interleaving
a bushy execution tree with hash filters to improve the
execution of multi-join queries. Similar to semi-joins
in distributed query processing, hash filters can be
applied to eliminate non-matching tuples from joining
relations before the execution of a join, thus reducing
the join cost. Note that hash filters built in
different execution stages of a bushy tree can have
different costs and effects. The effect of hash filters
is evaluated first. Then, an efficient scheme to
determine an effective sequence of hash filters for a
bushy execution tree is developed, where hash filters
are built and applied based on the join sequence
specified in the bushy tree so that not only is the
reduction effect optimized but also the cost associated
is minimized. Various schemes using hash filters are
implemented and evaluated via simulation. It is
experimentally shown that the application of hash
filters is in general a very powerful means to improve
the execution of multi-join queries, and the
improvement becomes more prominent as the number of
relations in a query increases.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "bushy trees; hash filters; parallel query processing;
sort-merge joins",
remark = "Check month: May or August??",
}
@Article{Ioannidis:1997:PQO,
author = "Yannis E. Ioannidis and Raymond T. Ng and Kyuseok Shim
and Timos K. Sellis",
title = "Parametric Query Optimization",
journal = j-VLDB-J,
volume = "6",
number = "2",
pages = "132--151",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:41 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/i/Ioannidis:Yannis_E=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Ng:Raymond_T=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sellis:Timos_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shim:Kyuseok.html;
http://link.springer.de/link/service/journals/00778/bibs/7006002/70060132.htm;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060132.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060132.ps.gz",
abstract = "In most database systems, the values of many important
run-time parameters of the system, the data, or the
query are unknown at query optimization time.
Parametric query optimization attempts to identify at
compile time several execution plans, each one of which
is optimal for a subset of all possible values of the
run-time parameters. The goal is that at run time, when
the actual parameter values are known, the appropriate
plan should be identifiable with essentially no
overhead. We present a general formulation of this
problem and study it primarily for the buffer size
parameter. We adopt randomized algorithms as the main
approach to this style of optimization and enhance them
with a {\em sideways information passing\/} feature
that increases their effectiveness in the new task.
Experimental results of these enhanced algorithms show
that they optimize queries for large numbers of buffer
sizes in the same time needed by their conventional
versions for a single buffer size, without much
sacrifice in the output quality and with essentially
zero run-time overhead.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
remark = "Check month: May or August??",
}
@Article{Mehrotra:1997:CCH,
author = "Sharad Mehrotra and Henry F. Korth and Avi
Silberschatz",
title = "Concurrency Control in Hierarchical Multidatabase
Systems",
journal = j-VLDB-J,
volume = "6",
number = "2",
pages = "152--172",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:41 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Korth:Henry_F=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mehrotra:Sharad.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Silberschatz:Abraham.html;
http://link.springer.de/link/service/journals/00778/bibs/7006002/70060152.htm;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060152.pdf;
http://link.springer.de/link/service/journals/00778/papers/7006002/70060152.ps.gz;
http://link.springer.de/link/service/journals/00778/tocs/mailto: HREF="mailto:helpdesk@link.springer.de">helpdesk@link.springer.de",
abstract = "Over the past decade, significant research has been
done towards developing transaction management
algorithms for multidatabase systems. Most of this work
assumes a monolithic architecture of the multidatabase
system with a single software module that follows a
single transaction management algorithm to ensure the
consistency of data stored in the local databases. This
monolithic architecture is not appropriate in a
multidatabase environment where the system spans
multiple different organizations that are distributed
over various geographically distant locations. In this
paper, we propose an alternative multidatabase
transaction management architecture, where the system
is hierarchical in nature. Hierarchical architecture
has consequences on the design of transaction
management algorithms. An implication of the
architecture is that the transaction management
algorithms followed by a multidatabase system must be
{\em composable\/} --- that is, it must be possible to
incorporate individual multidatabase systems as
elements in a larger multidatabase system. We present a
hierarchical architecture for a multidatabase
environment and develop techniques for concurrency
control in such systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; database management; distributed
databases; multidatabase management",
remark = "Check month: May or August??",
xxauthor = "Sharad Mehrotra and Henry F. Korth and Abraham
Silberschatz",
}
@Article{Cobb:1997:IOT,
author = "Edward E. Cobb",
title = "The impact of object technology on commercial
transaction processing",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "173--190",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:42 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Businesses today are searching for information
solutions that enable them to compete in the global
marketplace. To minimize risk, these solutions must
build on existing investments, permit the best
technology to be applied to the problem, and be
manageable. Object technology, with its promise of
improved productivity and quality in application
development, delivers these characteristics but, to
date, its deployment in commercial business
applications has been limited. One possible reason is
the absence of the transaction paradigm, widely used in
commercial environments and essential for reliable
business applications. For object technology to be a
serious contender in the construction of these
solutions requires: --- technology for transactional
objects. In December 1994, the Object Management Group
adopted a specification for an object {\em transaction
service\/} (OTS). The OTS specifies mechanisms for
defining and manipulating transactions. Though derived
from the X/Open distributed transaction processing
model, OTS contains additional enhancements
specifically designed for the object environment.
Similar technology from Microsoft appeared at the end
of 1995. --- methodologies for building new business
systems from existing parts. Business process
re-engineering is forcing businesses to improve their
operations which bring products to market. {\em
Workflow computing}, when used in conjunction with {\em
``object wrappers''\/} provides tools to both define
and track execution of business processes which
leverage existing applications and infrastructure. --
an execution environment which satisfies the
requirements of the operational needs of the business.
Transaction processing (TP) monitor technology, though
widely accepted for mainframe transaction processing,
has yet to enjoy similar success in the client/server
marketplace. Instead the database vendors, with their
extensive tool suites, dominate. As object brokers
mature they will require many of the functions of
today's TP monitors. Marrying these two technologies
can produce a robust execution environment which offers
a superior alternative for building and deploying
client/server applications.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "objects; transaction processing; workflow",
}
@Article{Cobb:1997:ITC,
author = "Edward E. Cobb",
title = "The Impact of Technology on Commercial Transaction
Processing",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "173--190",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 10:11:57 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t0006003.htm;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Cobb:Edward_E=.html;
http://link.springer.de/link/service/journals/00778/bibs/7006003/70060173.htm",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
remark = "Check month: May or August??",
xxtitle = "The impact of object technology on commercial
transaction processing",
}
@Article{Steinbrunn:1997:HRO,
author = "Michael Steinbrunn and Guido Moerkotte and Alfons
Kemper",
title = "Heuristic and Randomized Optimization for the Join
Ordering Problem",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "191--208",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:42 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t0006003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kemper:Alfons.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Moerkotte:Guido.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Steinbrunn:Michael.html;
http://link.springer.de/link/service/journals/00778/bibs/7006003/70060191.htm",
abstract = "Recent developments in database technology, such as
deductive database systems, have given rise to the
demand for new, cost-effective optimization techniques
for join expressions. In this paper many different
algorithms that compute approximate solutions for
optimizing join orders are studied since traditional
dynamic programming techniques are not appropriate for
complex problems. Two possible solution spaces, the
space of left-deep and bushy processing trees, are
evaluated from a statistical point of view. The result
is that the common limitation to left-deep processing
trees is only advisable for certain join graph types.
Basically, optimizers from three classes are analysed:
heuristic, randomized and genetic algorithms. Each one
is extensively scrutinized with respect to its working
principle and its fitness for the desired application.
It turns out that randomized and genetic algorithms are
well suited for optimizing join expressions. They
generate solutions of high quality within a reasonable
running time. The benefits of heuristic optimizers,
namely the short running time, are often outweighed by
merely moderate optimization performance.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "genetic algorithms; heuristic algorithms; join
ordering; query optimization; randomized algorithms",
remark = "Check month: May or August??",
}
@Article{Panagos:1997:SRC,
author = "Euthimios Panagos and Alexandros Biliris",
title = "Synchronization and Recovery in a Client-Server
Storage System",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "209--223",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:42 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t0006003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Biliris:Alexandros.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Panagos:Euthimios.html;
http://link.springer.de/link/service/journals/00778/bibs/7006003/70060209.htm",
abstract = "Client-server object-oriented database management
systems differ significantly from traditional
centralized systems in terms of their architecture and
the applications they target. In this paper, we present
the client-server architecture of the EOS storage
manager and we describe the concurrency control and
recovery mechanisms it employs. EOS offers a
semi-optimistic locking scheme based on the
multi-granularity two-version two-phase locking
protocol. Under this scheme, multiple concurrent
readers are allowed to access a data item while it is
being updated by a single writer. Recovery is based on
write-ahead redo-only logging. Log records are
generated at the clients and they are shipped to the
server during normal execution and at transaction
commit. Transaction rollback is fast because there are
no updates that have to be undone, and recovery from
system crashes requires only one scan of the log for
installing the changes made by transactions that
committed before the crash. We also present a
preliminary performance evaluation of the
implementation of the above mechanisms.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "checkpoint; client-server architecture; concurrency
control; locking; logging; object management; recovery;
transaction management",
remark = "Check month: May or August??",
}
@Article{Lomet:1997:CRI,
author = "David B. Lomet and Betty Salzberg",
title = "Concurrency and Recovery for Index Trees",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "224--240",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:42 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t0006003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lomet:David_B=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Salzberg:Betty.html;
http://link.springer.de/link/service/journals/00778/bibs/7006003/70060224.htm",
abstract = "Although many suggestions have been made for
concurrency in B$^+$-trees, few of these have
considered recovery as well. We describe an approach
which provides high concurrency while preserving
well-formed trees across system crashes. Our approach
works for a class of index trees that is a
generalization of the B$^{\rm link}$-tree. This class
includes some multi-attribute indexes and temporal
indexes. Structural changes in an index tree are
decomposed into a sequence of atomic actions, each one
leaving the tree well-formed and each working on a
separate level of the tree. All atomic actions on
levels of the tree above the leaf level are independent
of database transactions, and so are of short duration.
Incomplete structural changes are detected in normal
operations and trigger completion.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; B-trees; concurrency; indexing;
recovery",
remark = "Check month: May or August??",
}
@Article{Haas:1997:STA,
author = "Laura M. Haas and Michael J. Carey and Miron Livny and
Amit Shukla",
title = "Seeking the truth about {\em ad hoc\/} join costs",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "241--256",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:42 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t0006003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Carey:Michael_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Haas:Laura_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Livny:Miron.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shukla:Amit.html;
http://link.springer.de/link/service/journals/00778/bibs/7006003/70060241.htm",
abstract = "In this paper, we re-examine the results of prior work
on methods for computing {\em ad hoc\/} joins. We
develop a detailed cost model for predicting join
algorithm performance, and we use the model to develop
cost formulas for the major {\em ad hoc\/} join methods
found in the relational database literature. We show
that various pieces of ``common wisdom'' about join
algorithm performance fail to hold up when analyzed
carefully, and we use our detailed cost model to derive
optimal buffer allocation schemes for each of the join
methods examined here. We show that optimizing their
buffer allocations can lead to large performance
improvements, e.g., as much as a 400\% improvement in
some cases. We also validate our cost model's
predictions by measuring an actual implementation of
each join algorithm considered. The results of this
work should be directly useful to implementors of
relational query optimizers and query processing
systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "buffer allocation; cost models; join methods;
optimization; performance",
remark = "Check month: May or August??",
}
@Article{Papazoglou:1997:EDM,
author = "Mike P. Papazoglou and Bernd J. Kr{\"a}mer",
title = "Erratum --- {A} database model for object dynamics",
journal = j-VLDB-J,
volume = "6",
number = "3",
pages = "257--260",
month = aug,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:42 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t0006003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition. See \cite{Papazoglou:1997:DMO}.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kr=auml=mer:Bernd_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Papazoglou:Mike_P=.html;
http://link.springer.de/link/service/journals/00778/bibs/7006003/70060257.htm",
abstract = "Due to a technical error, some figures of the above
paper were not reproduced satisfactorily. They are
printed again below.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
remark = "Check month: May or August??",
}
@Article{Fahl:1997:QPO,
author = "Gustav Fahl and Tore Risch",
title = "Query Processing Over Object Views of Relational
Data",
journal = j-VLDB-J,
volume = "6",
number = "4",
pages = "261--281",
month = nov,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:44 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Fahl:Gustav.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Risch:Tore.html;
http://link.springer.de/link/service/journals/00778/bibs/7006004/70060261.htm;
http://link.springer.de/link/service/journals/00778/papers/7006004/70060261.pdf",
abstract = "This paper presents an approach to {\em object view\/}
management for relational databases. Such a view
mechanism makes it possible for users to transparently
work with data in a relational database as if it was
stored in an object-oriented (OO) database. A query
against the object view is translated to one or several
queries against the relational database. The results of
these queries are then processed to form an answer to
the initial query. The approach is not restricted to a
`pure' object view mechanism for the relational data,
since the object view can also store its own data and
methods. Therefore it must be possible to process
queries that combine local data residing in the object
view with data retrieved from the relational database.
We discuss the key issues when object views of
relational databases are developed, namely: how to map
relational structures to sub-type/supertype hierarchies
in the view, how to represent relational database
access in OO query plans, how to provide the concept of
object identity in the view, how to handle the fact
that the extension of types in the view depends on the
state of the relational database, and how to process
and optimize queries against the object view. The
results are based on experiences from a running
prototype implementation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "object views; object-oriented federated databases;
query optimization; query processing; relational
databases",
}
@Article{Diaz:1997:EEA,
author = "Oscar D{\'\i}az and Arturo Jaime",
title = "{EXACT}: An Extensible Approach to Active
Object-Oriented Databases",
journal = j-VLDB-J,
volume = "6",
number = "4",
pages = "282--295",
month = nov,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:44 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/D=iacute=az:Oscar.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jaime:Arturo.html;
http://link.springer.de/link/service/journals/00778/bibs/7006004/70060282.htm;
http://link.springer.de/link/service/journals/00778/papers/7006004/70060282.pdf",
abstract = "Active database management systems (DBMSs) are a
fast-growing area of research, mainly due to the large
number of applications which can benefit from this
active dimension. These applications are far from being
homogeneous, requiring different kinds of
functionalities. However, most of the active DBMSs
described in the literature only provide a {\em fixed,
hard-wired\/} execution model to support the active
dimension. In object-oriented DBMSs,
event-condition-action rules have been proposed for
providing active behaviour. This paper presents EXACT,
a rule manager for object-oriented DBMSs which provides
a variety of options from which the designer can choose
the one that best fits the semantics of the concept to
be supported by rules. Due to the difficulty of
foreseeing future requirements, special attention has
been paid to making rule management easily extensible,
so that the user can tailor it to suit specific
applications. This has been borne out by an
implementation in ADAM, an object-oriented DBMS. An
example is shown of how the default mechanism can be
easily extended to support new requirements.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "active DBMS; extensibility; metaclasses;
object-oriented DBMS",
}
@Article{Bohm:1997:SDS,
author = "Klemens B{\"o}hm and Karl Aberer and Erich J. Neuhold
and Xiaoya Yang",
title = "Structured Document Storage and Refined Declarative
and Navigational Access Mechanisms in {HyperStorM}",
journal = j-VLDB-J,
volume = "6",
number = "4",
pages = "296--311",
month = nov,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:44 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Aberer:Karl.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/B=ouml=hm:Klemens.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Neuhold:Erich_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yang:Xiaoya.html;
http://link.springer.de/link/service/journals/00778/bibs/7006004/70060296.htm;
http://link.springer.de/link/service/journals/00778/papers/7006004/70060296.pdf",
abstract = "The combination of SGML and database technology allows
to refine both declarative and navigational access
mechanisms for structured document collection: with
regard to declarative access, the user can formulate
complex information needs without knowing a query
language, the respective document type definition (DTD)
or the underlying modelling. Navigational access is
eased by hyperlink-rendition mechanisms going beyond
plain link-integrity checking. With our approach, the
database-internal representation of documents is
configurable. It allows for an efficient implementation
of operations, because DTD knowledge is not needed for
document structure recognition. We show how the number
of method invocations and the cost of parsing can be
significantly reduced.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "document query languages; navigation; OODBMSs; SGML",
}
@Article{Muck:1997:CTH,
author = "Thomas A. M{\"u}ck and Martin L. Polaschek",
title = "A Configurable Type Hierarchy Index for {OODB}",
journal = j-VLDB-J,
volume = "6",
number = "4",
pages = "312--332",
month = nov,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 08:46:02 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/M=uuml=ck:Thomas_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Polaschek:Martin_L=.html",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Mueck:1997:CTH,
author = "Thomas A. Mueck and Martin L. Polaschek",
title = "A configurable type hierarchy index for {OODB}",
journal = j-VLDB-J,
volume = "6",
number = "4",
pages = "312--332",
month = nov,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:44 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t7006004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/7006004/70060312.htm;
http://link.springer.de/link/service/journals/00778/papers/7006004/70060312.pdf",
abstract = "With respect to the specific requirements of advanced
OODB applications, index data structures for type
hierarchies in OODBMS have to provide efficient support
for multiattribute queries and have to allow index
optimization for a particular query profile. We
describe the {\em multikey type index\/} and an
efficient implementation of this indexing scheme. It
meets both requirements: in addition to its
multiattribute query capabilities it is designed as a
mediator between two standard design alternatives,
key-grouping and type-grouping. A prerequisite for the
multikey type index is a linearization algorithm which
maps type hierarchies to linearly ordered attribute
domains in such a way that each subhierarchy is
represented by an interval of this domain. The
algorithm extends previous results with respect to
multiple inheritance. The subsequent evaluation of our
proposal focuses on storage space overhead as well as
on the number of disk I/O operations needed for query
execution. The analytical results for the multikey type
index are compared to previously published figures for
well-known single-key search structures. The comparison
clearly shows the superiority of the multikey type
index for a large class of query profiles.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; indexing; multiple inheritance; OODB;
type hierarchies",
}
@Article{Berchtold:1997:UEF,
author = "Stefan Berchtold and Daniel A. Keim and Hans-Peter
Kriegel",
title = "Using Extended Feature Objects for Partial Similarity
Retrieval",
journal = j-VLDB-J,
volume = "6",
number = "4",
pages = "333--348",
month = nov,
year = "1997",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:44 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb6.html;
http://link.springer.de/link/service/journals/00778/tocs/t7006004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Berchtold:Stefan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Keim:Daniel_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kriegel:Hans=Peter.html;
http://link.springer.de/link/service/journals/00778/bibs/7006004/70060333.htm;
http://link.springer.de/link/service/journals/00778/papers/7006004/70060333.pdf",
abstract = "In this paper, we introduce the concept of extended
feature objects for similarity retrieval. Conventional
approaches for similarity search in databases map each
object in the database to a point in some
high-dimensional feature space and define similarity as
some distance measure in this space. For many
similarity search problems, this feature-based approach
is not sufficient. When retrieving partially similar
polygons, for example, the search cannot be restricted
to edge sequences, since similar polygon sections may
start and end anywhere on the edges of the polygons. In
general, inherently continuous problems such as the
partial similarity search cannot be solved by using
point objects in feature space. In our solution, we
therefore introduce extended feature objects consisting
of an infinite set of feature points. For an efficient
storage and retrieval of the extended feature objects,
we determine the minimal bounding boxes of the feature
objects in multidimensional space and store these boxes
using a spatial access structure. In our concrete
polygon problem, sets of polygon sections are mapped to
2D feature objects in high-dimensional space which are
then approximated by minimal bounding boxes and stored
in an R$^*$-tree. The selectivity of the index is
improved by using an adaptive decomposition of very
large feature objects and a dynamic joining of small
feature objects. For the polygon problem, translation,
rotation, and scaling invariance is achieved by using
the Fourier-transformed curvature of the normalized
polygon sections. In contrast to vertex-based
algorithms, our algorithm guarantees that no false
dismissals may occur and additionally provides fast
search times for realistic database sizes. We evaluate
our method using real polygon data of a supplier for
the car manufacturing industry.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "CAD databases; Fourier transformation; indexing and
query processing of spatial objects; partial similarity
retrieval",
}
@Article{Han:1998:ORQ,
author = "Jia Liang Han",
title = "Optimizing Relational Queries in Connection
Hypergraphs: Nested Queries, Views, and Binding
Propagations",
journal = j-VLDB-J,
volume = "7",
number = "1",
pages = "1--11",
month = feb,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Han:Jia_Liang.html;
http://link.springer.de/link/service/journals/00778/bibs/8007001/80070001.htm;
http://link.springer.de/link/service/journals/00778/papers/8007001/80070001.pdf",
abstract = "We optimize relational queries using connection
hypergraphs (CHGs). All operations including
value-passing between SQL blocks can be set-oriented.
By introducing partial evaluations, reordering
operations can be achieved for nested queries. For a
query using views, we merge CHGs for the views and the
query into one CHG and then apply query optimization.
Furthermore, we may simulate magic sets methods
elegantly in a CHG. Sideways information-passing
strategies (SIPS) in a CHG amount to partial
evaluations of SIPS paths. We introduce the maximum
SIPS strategy, which performs SIPS for all bindings and
all SIPS paths for a query. The new method has several
advantages. First, the maximum SIPS strategy can be
more efficient than the previous SIPS based on simple
heuristics. Second, it is conceptually simple and easy
to implement. Third, the processing strategies may be
incorporated with the search space for query execution
plans, which is a proven optimization strategy
introduced by System R. Fourth, it provides a general
framework of query optimization and may potentially be
used to optimize next-generation database systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "connection hypergraphs; partial evaluations;
relational query optimization; search space; SIPS",
}
@Article{Hanson:1998:FRC,
author = "Eric N. Hanson and I.-Cheng Chen and Roxana Dastur and
Kurt Engel and Vijay Ramaswamy and Wendy Tan and Chun
Xu",
title = "A Flexible and Recoverable Client\slash Server
Database Event Notification System",
journal = j-VLDB-J,
volume = "7",
number = "1",
pages = "12--24",
month = feb,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chen:I==Cheng.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Dastur:Roxana.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/e/Engel:Kurt.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hanson:Eric_N=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ramaswamy:Vijay.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tan:Wendy.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/x/Xu:Chun.html;
http://link.springer.de/link/service/journals/00778/bibs/8007001/80070012.htm;
http://link.springer.de/link/service/journals/00778/papers/8007001/80070012.pdf",
abstract = "A software architecture is presented that allows
client application programs to interact with a DBMS
server in a flexible and powerful way, using either
direct, volatile messages, or messages sent via
recoverable queues. Normal requests from clients to the
server and replies from the server to clients can be
transmitted using direct or recoverable messages. In
addition, an application event notification mechanism
is provided, whereby client applications running
anywhere on the network can register for events, and
when those events are raised, the clients are notified.
A novel parameter passing mechanism allows a set of
tuples to be included in an event notification. The
event mechanism is particularly useful in an active
DBMS, where events can be raised by triggers to signal
running application programs.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Mehta:1998:OPM,
author = "Ashish Mehta and James Geller and Yehoshua Perl and
Erich J. Neuhold",
title = "The {OODB} Path-Method Generator ({PMG}) Using Access
Weights and Precomputed Access Relevance",
journal = j-VLDB-J,
volume = "7",
number = "1",
pages = "25--47",
month = feb,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Geller:James.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mehta:Ashish.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Neuhold:Erich_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Perl:Yehoshua.html;
http://link.springer.de/link/service/journals/00778/bibs/8007001/80070025.htm;
http://link.springer.de/link/service/journals/00778/papers/8007001/80070025.pdf",
abstract = "A {\em path-method\/} is used as a mechanism in
object-oriented databases (OODBs) to retrieve or to
update information relevant to one class that is not
stored with that class but with some other class. A
path-method is a method which traverses from one class
through a chain of connections between classes and
accesses information at another class. However, it is a
difficult task for a casual user or even an application
programmer to write path-methods to facilitate queries.
This is because it might require comprehensive
knowledge of many classes of the conceptual schema that
are not directly involved in the query, and therefore
may not even be included in a user's (incomplete) view
about the contents of the database. We have developed a
system, called {\em path-method generator\/} (PMG),
which generates path-methods automatically according to
a user's database-manipulating requests. The PMG offers
the user one of the possible path-methods and the user
verifies from his knowledge of the intended purpose of
the request whether that path-method is the desired
one. If the path method is rejected, then the user can
utilize his now increased knowledge about the database
to request (with additional parameters given) another
offer from the PMG. The PMG is based on {\em access
weights\/} attached to the connections between classes
and precomputed {\em access relevance\/} between every
pair of classes of the OODB. Specific rules for access
weight assignment and algorithms for computing access
relevance appeared in our previous papers [MGPF92,
MGPF93, MGPF96]. In this paper, we present a variety of
traversal algorithms based on access weights and
precomputed access relevance. Experiments identify some
of these algorithms as very successful in generating
most desired path-methods. The PMG system utilizes
these successful algorithms and is thus an efficient
tool for aiding the user with the difficult task of
querying and updating a large OODB.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access relevance; access weight; object-oriented
databases; OODB queries; path-method; traversal
algorithms",
}
@Article{Scheuermann:1998:DPL,
author = "Peter Scheuermann and Gerhard Weikum and Peter
Zabback",
title = "Data Partitioning and Load Balancing in Parallel Disk
Systems",
journal = j-VLDB-J,
volume = "7",
number = "1",
pages = "48--66",
month = feb,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Scheuermann:Peter.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Weikum:Gerhard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zabback:Peter.html;
http://link.springer.de/link/service/journals/00778/bibs/8007001/80070048.htm;
http://link.springer.de/link/service/journals/00778/papers/8007001/80070048.pdf",
abstract = "Parallel disk systems provide opportunities for
exploiting I/O parallelism in two possible ways, namely
via inter-request and intra-request parallelism. In
this paper, we discuss the main issues in performance
tuning of such systems, namely striping and load
balancing, and show their relationship to response time
and throughput. We outline the main components of an
intelligent, self-reliant file system that aims to
optimize striping by taking into account the
requirements of the applications, and performs load
balancing by judicious file allocation and dynamic
redistributions of the data when access patterns
change. Our system uses simple but effective heuristics
that incur only little overhead. We present performance
experiments based on synthetic workloads and real-life
traces.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data allocation; disk cooling; file striping; load
balancing; parallel disk systems; performance tuning",
}
@Article{Ishakbeyoglu:1998:MII,
author = "Naci S. Ishakbeyo{\u{g}}lu and Z. Meral
{\"O}zsoyo{\u{g}}lu",
title = "Maintenance of Implication Integrity Constraints Under
Updates to Constraints",
journal = j-VLDB-J,
volume = "7",
number = "2",
pages = "67--78",
month = may,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/=/=Ouml=zsoyoglu:Z=_Meral.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/i/Ishakbeyoglu:Naci.html;
http://link.springer.de/link/service/journals/00778/bibs/8007002/80070067.htm;
http://link.springer.de/link/service/journals/00778/papers/8007002/80070067.pdf",
abstract = "Semantic integrity constraints are used for enforcing
the integrity of the database as well as for improving
the efficiency of the database utilization. Although
semantic integrity constraints are usually much more
static as compared to the data itself, changes in the
data semantics may necessitate corresponding changes in
the constraint base. In this paper we address the
problems related with maintaining a consistent and
non-redundant set of constraints satisfied by the
database in the case of updates to the constraint base.
We consider implication constraints as semantic
integrity constraints. The constraints are represented
as conjunctions of inequalities. We present a
methodology to determine whether a constraint is
redundant or contradictory with respect to a set of
constraints. The methodology is based on the
partitioning of the constraint base which improves the
efficiency of algorithms that check whether a
constraint is redundant or contradictory with respect
to a constraint base.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "implication integrity constraints; integrity
constraints; partitioning; redundancy; satisfiability",
}
@Article{Dessloch:1998:ADP,
author = "Stefan De{\ss}loch and Theo H{\"a}rder and Nelson
Mendon{\c{c}}a Mattos and Bernhard Mitschang and
Joachim Thomas",
title = "Advanced Data Processing in {KRISYS}: Modeling
Concepts, Implementation Techniques, and Client\slash
Server Issues",
journal = j-VLDB-J,
volume = "7",
number = "2",
pages = "79--95",
month = may,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/De=szlig=loch:Stefan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/H=auml=rder:Theo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mattos:Nelson_Mendon=ccedil=a.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mitschang:Bernhard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Thomas:Joachim.html;
http://link.springer.de/link/service/journals/00778/bibs/8007002/80070079.htm;
http://link.springer.de/link/service/journals/00778/papers/8007002/80070079.pdf",
abstract = "The increasing power of modern computers is steadily
opening up new application domains for advanced data
processing such as engineering and knowledge-based
applications. To meet their requirements, concepts for
advanced data management have been investigated during
the last decade, especially in the field of object
orientation. Over the last couple of years, the
database group at the University of Kaiserslautern has
been developing such an advanced database system, the
KRISYS prototype. In this article, we report on the
results and experiences obtained in the course of this
project. The primary objective for the first version of
KRISYS was to provide semantic features, such as an
expressive data model, a set-oriented query language,
deductive as well as active capabilities. The first
KRISYS prototype became completely operational in 1989.
To evaluate its features and to stabilize its
functionality, we started to develop several
applications with the system. These experiences marked
the starting point for an overall redesign of KRISYS.
Major goals were to tune KRISYS and its
query-processing facilities to a suitable client/server
environment, as well as to provide elaborate mechanisms
for consistency control comprising semantic integrity
constraints, multi-user synchronization, and failure
recovery. The essential aspects of the resulting
client/server architecture are embodied by the
client-side data management needed to effectively
support advanced applications and to gain the required
system performance for interactive work. The project
stages of KRISYS properly reflect the essential
developments that have taken place in the research on
advanced database systems over the last years. Hence,
the subsequent discussions will bring up a number of
important aspects with regard to advanced data
processing that are of significant general importance,
as well as of general applicability to database
systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "client\slash server architectures; Consistency
control; object-oriented modeling concepts; query
processing; run-time optimization",
}
@Article{Abiteboul:1998:LVS,
author = "Serge Abiteboul and Sophie Cluet and Tova Milo",
title = "A Logical View of Structured Files",
journal = j-VLDB-J,
volume = "7",
number = "2",
pages = "96--114",
month = may,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Abiteboul:Serge.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Cluet:Sophie.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Milo:Tova.html;
http://link.springer.de/link/service/journals/00778/bibs/8007002/80070096.htm;
http://link.springer.de/link/service/journals/00778/papers/8007002/80070096.pdf",
abstract = "Structured data stored in files can benefit from
standard database technology. In particular, we show
here how such data can be queried and updated using
declarative database languages. We introduce the notion
of {\em structuring schema}, which consists of a
grammar annotated with database programs. Based on a
structuring schema, a file can be viewed as a database
structure, queried and updated as such. For {\em
queries}, we show that almost standard database
optimization techniques can be used to answer queries
without having to construct the entire database. For
{\em updates}, we study in depth the propagation to the
file of an update specified on the database view of
this file. The problem is not feasible in general and
we present a number of negative results. The positive
results consist of techniques that allow to propagate
updates efficiently under some reasonable {\em
locality\/} conditions on the structuring schemas.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database; file system; query; query and update
optimization; textual data; update",
}
@Article{Ooi:1998:FIR,
author = "Beng Chin Ooi and Kian-Lee Tan and Tat Seng Chua and
Wynne Hsu",
title = "Fast Image Retrieval Using Color-Spatial Information",
journal = j-VLDB-J,
volume = "7",
number = "2",
pages = "115--128",
month = may,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:45 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chua:Tat=Seng.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Hsu:Wynne.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/Ooi:Beng_Chin.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tan:Kian=Lee.html;
http://link.springer.de/link/service/journals/00778/bibs/8007002/80070115.htm;
http://link.springer.de/link/service/journals/00778/papers/8007002/80070115.pdf",
abstract = "In this paper, we present an image retrieval system
that employs both the color and spatial information of
images to facilitate the retrieval process. The basic
unit used in our technique is a {\em single-colored
cluster}, which bounds a homogeneous region of that
color in an image. Two clusters from two images are
similar if they are of the same color and overlap in
the image space. The number of clusters that can be
extracted from an image can be very large, and it
affects the accuracy of retrieval. We study the effect
of the number of clusters on retrieval effectiveness to
determine an appropriate value for ``optimal''
performance. To facilitate efficient retrieval, we also
propose a multi-tier indexing mechanism called the {\em
Sequenced Multi-Attribute Tree\/} (SMAT). We
implemented a two-tier SMAT, where the first layer is
used to prune away clusters that are of different
colors, while the second layer discriminates clusters
of different spatial locality. We conducted an
experimental study on an image database consisting of
12,000 images. Our results show the effectiveness of
the proposed color-spatial approach, and the efficiency
of the proposed indexing mechanism.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "color-spatial information; content-based retrieval;
sequenced multi-attribute tree; single-colored
cluster",
}
@Article{Jarke:1998:GE,
author = "Matthias Jarke",
title = "Guest {Editorial}",
journal = j-VLDB-J,
volume = "7",
number = "3",
pages = "129--129",
month = aug,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:47 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t8007003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/8007003/80070129.htm;
http://link.springer.de/link/service/journals/00778/papers/8007003/80070129.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Seshadri:1998:EAD,
author = "Praveen Seshadri",
title = "Enhanced Abstract Data Types in Object-Relational
Databases",
journal = j-VLDB-J,
volume = "7",
number = "3",
pages = "130--140",
month = aug,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:47 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special Issue on {VLDB 1997}. Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Seshadri:Praveen.html;
http://link.springer.de/link/service/journals/00778/bibs/8007003/80070130.htm;
http://link.springer.de/link/service/journals/00778/papers/8007003/80070130.pdf",
abstract = "The explosion in complex multimedia content makes it
crucial for database systems to support such data
efficiently. This paper argues that the ``blackbox''
ADTs used in current object-relational systems inhibit
their performance, thereby limiting their use in
emerging applications. Instead, the next generation of
object-relational database systems should be based on
enhanced abstract data type (E-ADT) technology. An
(E-ADT) can expose the {\em semantics\/} of its methods
to the database system, thereby permitting advanced
query optimizations. Fundamental architectural changes
are required to build a database system with E-ADTs;
the added functionality should not compromise the
modularity of data types and the extensibility of the
type system. The implementation issues have been
explored through the development of E-ADTs in {\em
Predator}. Initial performance results demonstrate an
order of magnitude in performance improvements.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database types; extensibility; object-relational
database; query optimization",
}
@Article{Kraiss:1998:IDC,
author = "Achim Kraiss and Gerhard Weikum",
title = "Integrated Document Caching and Prefetching in Storage
Hierarchies Based on {Markov}-Chain Predictions",
journal = j-VLDB-J,
volume = "7",
number = "3",
pages = "141--162",
month = aug,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:47 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kraiss:Achim.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Weikum:Gerhard.html;
http://link.springer.de/link/service/journals/00778/bibs/8007003/80070141.htm;
http://link.springer.de/link/service/journals/00778/papers/8007003/80070141.pdf",
abstract = "Large multimedia document archives may hold a major
fraction of their data in tertiary storage libraries
for cost reasons. This paper develops an integrated
approach to the vertical data migration between the
tertiary, secondary, and primary storage in that it
reconciles speculative prefetching, to mask the high
latency of the tertiary storage, with the replacement
policy of the document caches at the secondary and
primary storage level, and also considers the
interaction of these policies with the tertiary and
secondary storage request scheduling. The integrated
migration policy is based on a continuous-time Markov
chain model for predicting the expected number of
accesses to a document within a specified time horizon.
Prefetching is initiated only if that expectation is
higher than those of the documents that need to be
dropped from secondary storage to free up the necessary
space. In addition, the possible resource contention at
the tertiary and secondary storage is taken into
account by dynamically assessing the response-time
benefit of prefetching a document versus the penalty
that it would incur on the response time of the pending
document requests. The parameters of the
continuous-time Markov chain model, the probabilities
of co-accessing certain documents and the interaction
times between successive accesses, are dynamically
estimated and adjusted to evolving workload patterns by
keeping online statistics. The integrated policy for
vertical data migration has been implemented in a
prototype system. The system makes profitable use of
the Markov chain model also for the scheduling of
volume exchanges in the tertiary storage library.
Detailed simulation experiments with Web-server-like
synthetic workloads indicate significant gains in terms
of client response time. The experiments also show that
the overhead of the statistical bookkeeping and the
computations for the access predictions is
affordable.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "caching; Markov chains; performance; prefetching;
scheduling; stochastic modeling; tertiary storage",
}
@Article{Chakrabarti:1998:SFS,
author = "Soumen Chakrabarti and Byron Dom and Rakesh Agrawal
and Prabhakar Raghavan",
title = "Scalable Feature Selection, Classification and
Signature Generation for Organizing Large Text
Databases into Hierarchical Topic Taxonomies",
journal = j-VLDB-J,
volume = "7",
number = "3",
pages = "163--178",
month = aug,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:47 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Agrawal:Rakesh.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chakrabarti:Soumen.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Dom:Byron.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Raghavan:Prabhakar.html;
http://link.springer.de/link/service/journals/00778/bibs/8007003/80070163.htm;
http://link.springer.de/link/service/journals/00778/papers/8007003/80070163.pdf",
abstract = "We explore how to organize large text databases
hierarchically by topic to aid better searching,
browsing and filtering. Many corpora, such as Internet
directories, digital libraries, and patent databases
are manually organized into topic hierarchies, also
called {\em taxonomies}. Similar to indices for
relational data, taxonomies make search and access more
efficient. However, the exponential growth in the
volume of on-line textual information makes it nearly
impossible to maintain such taxonomic organization for
large, fast-changing corpora by hand. We describe an
automatic system that starts with a small sample of the
corpus in which topics have been assigned by hand, and
then updates the database with new documents as the
corpus grows, assigning topics to these new documents
with high speed and accuracy. To do this, we use
techniques from statistical pattern recognition to
efficiently separate the {\em feature\/} words, or {\em
discriminants}, from the {\em noise\/} words at each
node of the taxonomy. Using these, we build a
multilevel classifier. At each node, this classifier
can ignore the large number of ``noise'' words in a
document. Thus, the classifier has a small model size
and is very fast. Owing to the use of context-sensitive
features, the classifier is very accurate. As a
by-product, we can compute for each document a set of
terms that occur significantly more often in it than in
the classes to which it belongs. We describe the design
and implementation of our system, stressing how to
exploit standard, efficient relational operations like
sorts and joins. We report on experiences with the
Reuters newswire benchmark, the US patent database, and
web document samples from Yahoo!. We discuss
applications where our system can improve searching and
filtering capabilities.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Roy:1998:GCO,
author = "Prasan Roy and S. Seshadri and Abraham Silberschatz
and S. Sudarshan and S. Ashwin",
title = "Garbage Collection in Object-Oriented Databases Using
Transactional Cyclic Reference Counting",
journal = j-VLDB-J,
volume = "7",
number = "3",
pages = "179--193",
month = aug,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:47 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Ashwin:S=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Roy:Prasan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Seshadri:S=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Silberschatz:Abraham.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sudarshan:S=.html;
http://link.springer.de/link/service/journals/00778/bibs/8007003/80070179.htm;
http://link.springer.de/link/service/journals/00778/papers/8007003/80070179.pdf",
abstract = "Garbage collection is important in object-oriented
databases to free the programmer from explicitly
deallocating memory. In this paper, we present a
garbage collection algorithm, called Transactional
Cyclic Reference Counting (TCRC), for object-oriented
databases. The algorithm is based on a variant of a
reference-counting algorithm proposed for functional
programming languages The algorithm keeps track of
auxiliary reference count information to detect and
collect cyclic garbage. The algorithm works correctly
in the presence of concurrently running transactions,
and system failures. It does not obtain any long-term
locks, thereby minimizing interference with transaction
processing. It uses recovery subsystem logs to detect
pointer updates; thus, existing code need not be
rewritten. Finally, it exploits schema information, if
available, to reduce costs. We have implemented the
TCRC algorithm and present results of a performance
study of the implementation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Ng:1998:IRM,
author = "Wee Teck Ng and Peter M. Chen",
title = "Integrating Reliable Memory in Databases",
journal = j-VLDB-J,
volume = "7",
number = "3",
pages = "194--204",
month = aug,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:47 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chen:Peter_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Ng:Wee_Teck.html;
http://link.springer.de/link/service/journals/00778/bibs/8007003/80070194.htm;
http://link.springer.de/link/service/journals/00778/papers/8007003/80070194.pdf",
abstract = "Recent results in the Rio project at the University of
Michigan show that it is possible to create an area of
main memory that is as safe as disk from operating
system crashes. This paper explores how to integrate
the reliable memory provided by the Rio file cache into
a database system. Prior studies have analyzed the
performance benefits of reliable memory; we focus
instead on how different designs affect reliability. We
propose three designs for integrating reliable memory
into databases: non-persistent database buffer cache,
persistent database buffer cache, and persistent
database buffer cache with protection. Non-persistent
buffer caches use an I/O interface to reliable memory
and require the fewest modifications to existing
databases. However, they waste memory capacity and
bandwidth due to double buffering. Persistent buffer
caches use a memory interface to reliable memory by
mapping it into the database address space. This places
reliable memory under complete database control and
eliminates double buffering, but it may expose the
buffer cache to database errors. Our third design
reduces this exposure by write protecting the buffer
pages. Extensive fault tests show that mapping reliable
memory into the database address space does not
significantly hurt reliability. This is because wild
stores rarely touch dirty, committed pages written by
previous transactions. As a result, we believe that
databases should use a memory interface to reliable
memory.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "main memory database system (MMDB); recovery;
reliability",
}
@Article{Ozsu:1998:I,
author = "M. Tamer {\"O}zsu and Stavros Christodoulakis",
title = "Introduction",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "205--205",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:48 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Ozsu:1998:SIM,
author = "M. Tamer {\"O}zsu and Stavros Christodoulakis",
title = "Special Issue on Multimedia Databases: Introduction",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "205--205",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 10:11:57 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/=/=Ouml=zsu:M=_Tamer.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Christodoulakis:Stavros.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070205.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070205.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Garofalakis:1998:PRS,
author = "Minos N. Garofalakis and Banu {\"O}zden and Avi
Silberschatz",
title = "On Periodic Resource scheduling for Continuous-Media
Databases",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "206--225",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 10:11:57 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/=/=Ouml=zden:Banu.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Garofalakis:Minos_N=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Silberschatz:Abraham.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070206.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070206.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
xxauthor = "Minos N. Garofalakis and Banu {\"O}zden and Abraham
Silberschatz",
}
@Article{Jiang:1998:STC,
author = "Haitao Jiang and Ahmed K. Elmagarmid",
title = "Spatial and Temporal Content-Based Access to
Hypervideo Databases",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "226--238",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:48 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/e/Elmagarmid:Ahmed_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jiang:Haitao.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070226.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070226.pdf",
abstract = "Providing content-based video query, retrieval and
browsing is the most important goal of a video database
management system (VDBMS). Video data is unique not
only in terms of its spatial and temporal
characteristics, but also in the semantic associations
manifested by the entities present in the video. This
paper introduces a novel video data model called {\em
Logical Hypervideo Data Model}. In addition to
multilevel video abstractions, the model is capable of
representing video entities that users are interested
in (defined as {\em hot objects\/}) and their semantic
associations with other logical video abstractions,
including hot objects themselves. The semantic
associations are modeled as {\em video hyperlinks\/}
and video data with such property are called {\em
hypervideo}. Video hyperlinks provide a flexible and
effective way of browsing video data. Based on the
proposed model, video queries can be specified with
both temporal and spatial constraints, as well as with
semantic descriptions of the video data. The
characteristics of hot objects' spatial and temporal
relations and efficient evaluation of them are also
discussed. Some query examples are given to demonstrate
the expressiveness of the video data model and query
language. Finally, we describe a modular video database
system architecture that our web-based prototype is
based on.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "content-based query; hot object; hypervideo; spatial
and temporal constraint; video database",
}
@Article{Ng:1998:OCO,
author = "Raymond T. Ng and Paul Shum",
title = "Optimal Clip Ordering for Multi-Clip Queries",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "239--252",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:48 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Ng:Raymond_T=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shum:Paul.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070239.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070239.pdf",
abstract = "A multi-clip query requests multiple video clips be
returned as the answer of the query. In many
applications and situations, the order in which these
clips are to be delivered does not matter that much to
the user. This allows the system ample opportunities to
optimize system throughput by using schedules that
maximize the effect of piggybacking. In this paper, we
study how to find such optimal schedules. In
particular, we consider two optimization criteria: (i)
one based on maximizing the number of piggybacked
clips, and (ii) the other based on maximizing the
impact on buffer space. We show that the optimal
schedule under the first criterion is equivalent to a
maximum matching in a suitably defined bipartite graph,
and that under the second criterion, the optimal
schedule is equivalent to a maximum matching in a
suitably defined weighted bipartite graph. Our
experimental results, which are based on realistic
distributions, indicate that both kinds of optimal
schedules can lead to a gain in throughput of over
300\%. And yet the time taken to compute such an
optimal schedule is negligible. Finally, we show how to
deal with clips that are variable in length.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "admission control; bipartite graph matching;
performance of multimedia systems",
}
@Article{Soffer:1998:ISI,
author = "Aya Soffer and Hanan Samet",
title = "Integrating Symbolic Images into a Multimedia Database
System Using Classification and Abstraction
Approaches",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "253--274",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:48 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Samet:Hanan.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Soffer:Aya.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070253.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070253.pdf",
abstract = "Symbolic images are composed of a finite set of
symbols that have a semantic meaning. Examples of
symbolic images include maps (where the semantic
meaning of the symbols is given in the legend),
engineering drawings, and floor plans. Two approaches
for supporting queries on symbolic-image databases that
are based on image content are studied. The
classification approach preprocesses all symbolic
images and attaches a semantic classification and an
associated certainty factor to each object that it
finds in the image. The abstraction approach describes
each object in the symbolic image by using a vector
consisting of the values of some of its features (e.g.,
shape, genus, etc.). The approaches differ in the way
in which responses to queries are computed. In the
classification approach, images are retrieved on the
basis of whether or not they contain objects that have
the same classification as the objects in the query. On
the other hand, in the abstraction approach, retrieval
is on the basis of similarity of feature vector values
of these objects. Methods of integrating these two
approaches into a relational multimedia database
management system so that symbolic images can be stored
and retrieved based on their content are described.
Schema definitions and indices that support query
specifications involving spatial as well as contextual
constraints are presented. Spatial constraints may be
based on both locational information (e.g., distance)
and relational information (e.g., north of). Different
strategies for image retrieval for a number of typical
queries using these approaches are described. Estimated
costs are derived for these strategies. Results are
reported of a comparative study of the two approaches
in terms of image insertion time, storage space,
retrieval accuracy, and retrieval time.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "image indexing; multimedia databases; query
optimization; retrieval by content; spatial databases;
symbolic-image databases",
}
@Article{Zezula:1998:ASR,
author = "Pavel Zezula and Pasquale Savino and Giuseppe Amato
and Fausto Rabitti",
title = "Approximate Similarity Retrieval with {M}-Trees",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "275--293",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:48 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Amato:Giuseppe.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Rabitti:Fausto.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Savino:Pasquale.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zezula:Pavel.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070275.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070275.pdf",
abstract = "Motivated by the urgent need to improve the efficiency
of similarity queries, approximate similarity retrieval
is investigated in the environment of a metric tree
index called the M-tree. Three different approximation
techniques are proposed, which show how to forsake
query precision for improved performance. Measures are
defined that can quantify the improvements in
performance efficiency and the quality of
approximations. The proposed approximation techniques
are then tested on various synthetic and real-life
files. The evidence obtained from the experiments
confirms our hypothesis that a high-quality
approximated similarity search can be performed at a
much lower cost than that needed to obtain the exact
results. The proposed approximation techniques are
scalable and appear to be independent of the metric
used. Extensions of these techniques to the
environments of other similarity search indexes are
also discussed.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access structures; approximation algorithms; distance
only data; performance evaluation; similarity search",
}
@Article{Balkir:1998:DPM,
author = "Nevzat Hurkan Balkir and Gultekin {\"O}zsoyoglu",
title = "Delivering Presentations from Multimedia Servers",
journal = j-VLDB-J,
volume = "7",
number = "4",
pages = "294--307",
month = dec,
year = "1998",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:48 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb7.html;
http://link.springer.de/link/service/journals/00778/tocs/t8007004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/=/=Ouml=zsoyoglu:Gultekin.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Balkir:Nevzat_Hurkan.html;
http://link.springer.de/link/service/journals/00778/bibs/8007004/80070294.htm;
http://link.springer.de/link/service/journals/00778/papers/8007004/80070294.pdf",
abstract = "Most multimedia servers reported in the literature are
designed to serve multiple and independent video/audio
streams. We think that, in future, multimedia servers
will also serve complete presentations. Multimedia
presentations provide unique opportunities to develop
algorithms for buffer management and admission control,
as execution-time consumption requirements of
presentations are known a priori. In this paper, we
examine presentations in three different domains
(heavyweight, middleweight, and lightweight) and
provide buffer management and admission control
algorithms for the three domains. We propose two
improvements (flattening and dynamic-adjustments) on
the schedules created for the heavyweight
presentations. Results from a simulation environment
are presented.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "admission control; buffer management; flattening;
multimedia presentations",
}
@Article{Li:1999:FJU,
author = "Zhe Li and Kenneth A. Ross",
title = "Fast Joins Using Join Indices",
journal = j-VLDB-J,
volume = "8",
number = "1",
pages = "1--24",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:49 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Li:Zhe.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Ross:Kenneth_A=.html;
http://link.springer.de/link/service/journals/00778/bibs/9008001/90080001.htm;
http://link.springer.de/link/service/journals/00778/papers/9008001/90080001.pdf",
abstract = "Two new algorithms, ``Jive join'' and ``Slam join,''
are proposed for computing the join of two relations
using a join index. The algorithms are duals: Jive join
range-partitions input relation tuple ids and then
processes each partition, while Slam join forms ordered
runs of input relation tuple ids and then merges the
results. Both algorithms make a single sequential pass
through each input relation, in addition to one pass
through the join index and two passes through a
temporary file, whose size is half that of the join
index. Both algorithms require only that the number of
blocks in main memory is of the order of the square
root of the number of blocks in the smaller relation.
By storing intermediate and final join results in a
vertically partitioned fashion, our algorithms need to
manipulate less data in memory at a given time than
other algorithms. The algorithms are resistant to data
skew and adaptive to memory fluctuations. Selection
conditions can be incorporated into the algorithms.
Using a detailed cost model, the algorithms are
analyzed and compared with competing algorithms. For
large input relations, our algorithms perform
significantly better than Valduriez's algorithm, the
TID join algorithm, and hash join algorithms. An
experimental study is also conducted to validate the
analytical results and to demonstrate the performance
characteristics of each algorithm in practice.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "decision support systems; query processing",
remark = "Check month: April or May??",
}
@Article{Harder:1999:IPS,
author = "Theo H{\"a}rder and G{\"u}nter Sauter and Joachim
Thomas",
title = "The Intrinsic Problems of Structural Heterogeneity and
an Approach to Their Solution",
journal = j-VLDB-J,
volume = "8",
number = "1",
pages = "25--43",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:49 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/H=auml=rder:Theo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sauter:G=uuml=nter.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Thomas:Joachim.html;
http://link.springer.de/link/service/journals/00778/bibs/9008001/90080025.htm;
http://link.springer.de/link/service/journals/00778/papers/9008001/90080025.pdf",
abstract = "This paper focuses on the problems that arise when
integrating data from heterogeneous sources in a
single, unified database view. At first, we give a
detailed analysis of the kinds of structural
heterogeneity that occur when unified views are derived
from different database systems. We present the results
in a multiple tier architecture which distinguishes
different levels of heterogeneity and relates them to
their underlying causes as well as to the mapping
conflicts resulting from the view derivation process.
As the second essential contribution, the paper
presents our approach to a mapping language solving the
identified conflicts. The main characteristics of the
language are its descriptiveness, its capability to map
between schemas written in the relational,
object-oriented, ER, or EXPRESS data model, and its
facilities for specifying user-defined update
operations on the view that are to be propagated to the
data sources. Finally, we briefly discuss how this
mapping information is employed to convert queries
formulated with respect to the integrated view, into
database operations over the heterogeneous data
sources.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "heterogeneity; legacy systems; mapping language;
schema integration; schema mapping; updatable views",
remark = "Check month: April or May??",
}
@Article{Huang:1999:CTP,
author = "Yueh-Min Huang and Jen-Wen Ding and Shiao-Li Tsao",
title = "Constant Time Permutation: An Efficient Block
Allocation Strategy for Variable-Bit-Rate Continuous
Media Data",
journal = j-VLDB-J,
volume = "8",
number = "1",
pages = "44--54",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:49 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/Ding:Jen=Wen.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Huang:Yueh=Min.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tsao:Shiao=Li.html;
http://link.springer.de/link/service/journals/00778/bibs/9008001/90080044.htm;
http://link.springer.de/link/service/journals/00778/papers/9008001/90080044.pdf",
abstract = "To provide high accessibility of continuous-media (CM)
data, CM servers generally stripe data across multiple
disks. Currently, the most widely used striping scheme
for CM data is round-robin permutation (RRP).
Unfortunately, when RRP is applied to variable-bit-rate
(VBR) CM data, load imbalance across multiple disks
occurs, thereby reducing overall system performance. In
this paper, the performance of a VBR CM server with RRP
is analyzed. In addition, we propose an efficient
striping scheme called constant time permutation (CTP),
which takes the VBR characteristic into account and
obtains a more balanced load than RRP. Analytic models
of both RRP and CTP are presented, and the models are
verified via trace-driven simulations. Analysis and
simulation results show that CTP can substantially
increase the number of clients supported, though it
might introduce a few seconds/minutes of initial
delay.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "continuous-media server; data placement; load
balancing; striping; video-on-demand (VOD)",
remark = "Check month: April or May??",
}
@Article{Kabra:1999:OOO,
author = "Navin Kabra and David J. DeWitt",
title = "{OPT++}: an object-oriented implementation for
extensible database query optimization",
journal = j-VLDB-J,
volume = "8",
number = "1",
pages = "55--78",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:49 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/d/DeWitt:David_J=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kabra:Navin.html;
http://link.springer.de/link/service/journals/00778/bibs/9008001/90080055.htm;
http://link.springer.de/link/service/journals/00778/papers/9008001/90080055.pdf",
abstract = "In this paper we describe the design and
implementation of OPT++, a tool for extensible database
query optimization that uses an object-oriented design
to simplify the task of implementing, extending, and
modifying an optimizer. Building an optimizer using
OPT++ makes it easy to extend the query algebra (to add
new query algebra operators and physical implementation
algorithms to the system), easy to change the search
space, and also to change the search strategy.
Furthermore, OPT++ comes equipped with a number of
search strategies that are available for use by an
optimizer-implementor. OPT++ considerably simplifies
both, the task of implementing an optimizer for a new
database system, and the task of evaluating alternative
optimization techniques and strategies to decide what
techniques are best suited for that database system. We
present the results of a series of performance studies.
These results validate our design and show that, in
spite of its flexibility, OPT++ can be used to build
efficient optimizers.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "extensibility; object-relational databases; query
optimization; software architecture",
remark = "Check month: April or May??",
}
@Article{Krivokapic:1999:DDD,
author = "Natalija Krivokapi{\'c} and Alfons Kemper and Ehud
Gudes",
title = "Deadlock Detection in Distributed Database Systems:
a New Algorithm and a Comparative Performance
Analysis",
journal = j-VLDB-J,
volume = "8",
number = "2",
pages = "79--100",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:50 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Gudes:Ehud.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kemper:Alfons.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Krivokapic:Natalija.html;
http://link.springer.de/link/service/journals/00778/bibs/9008002/90080079.htm;
http://link.springer.de/link/service/journals/00778/papers/9008002/90080079.pdf",
abstract = "This paper attempts a comprehensive study of deadlock
detection in distributed database systems. First, the
two predominant deadlock models in these systems and
the four different distributed deadlock detection
approaches are discussed. Afterwards, a new deadlock
detection algorithm is presented. The algorithm is
based on dynamically creating {\em deadlock detection
agents\/} (DDAs), each being responsible for detecting
deadlocks in one connected component of the global
wait-for-graph (WFG). The DDA scheme is a
``self-tuning'' system: after an initial warm-up phase,
dedicated DDAs will be formed for ``centers of
locality'', i.e., parts of the system where many
conflicts occur. A dynamic shift in locality of the
distributed system will be responded to by
automatically creating new DDAs while the obsolete ones
terminate. In this paper, we also compare the most
competitive representative of each class of algorithms
suitable for distributed database systems based on a
simulation model, and point out their relative
strengths and weaknesses. The extensive experiments we
carried out indicate that our newly proposed deadlock
detection algorithm outperforms the other algorithms in
the vast majority of configurations and workloads and,
in contrast to all other algorithms, is very robust
with respect to differing load and access profiles.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "comparative performance analysis; deadlock detection;
distributed database systems; simulation study",
remark = "Check month: April or May??",
}
@Article{Boncz:1999:MPQ,
author = "Peter A. Boncz and Martin L. Kersten",
title = "{MIL} primitives for querying a fragmented world",
journal = j-VLDB-J,
volume = "8",
number = "2",
pages = "101--119",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:50 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Boncz:Peter_A=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kersten:Martin_L=.html;
http://link.springer.de/link/service/journals/00778/bibs/9008002/90080101.htm;
http://link.springer.de/link/service/journals/00778/papers/9008002/90080101.pdf",
abstract = "In query-intensive database application areas, like
decision support and data mining, systems that use
vertical fragmentation have a significant performance
advantage. In order to support relational or object
oriented applications on top of such a fragmented data
model, a flexible yet powerful intermediate language is
needed. This problem has been successfully tackled in
Monet, a modern extensible database kernel developed by
our group. We focus on the design choices made in the
Monet interpreter language (MIL), its algebraic query
language, and outline how its concept of tactical
optimization enhances and simplifies the optimization
of complex queries. Finally, we summarize the
experience gained in Monet by creating a highly
efficient implementation of MIL.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database systems; main-memory techniques; query
languages; query optimization; vertical fragmentation",
remark = "Check month: April or May??",
}
@Article{Aslan:1999:SHR,
author = "Goksel Aslan and Dennis McLeod",
title = "Semantic Heterogeneity Resolution in Federated
Databases by Metadata Implantation and Stepwise
Evolution",
journal = j-VLDB-J,
volume = "8",
number = "2",
pages = "120--132",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:50 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Aslan:Goksel.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/McLeod:Dennis.html;
http://link.springer.de/link/service/journals/00778/bibs/9008002/90080120.htm;
http://link.springer.de/link/service/journals/00778/papers/9008002/90080120.pdf",
abstract = "A key aspect of interoperation among data-intensive
systems involves the mediation of metadata and
ontologies across database boundaries. One way to
achieve such mediation between a local database and a
remote database is to fold remote metadata into the
local metadata, thereby creating a common platform
through which information sharing and exchange becomes
possible. Schema implantation and semantic evolution,
our approach to the metadata folding problem, is a
partial database integration scheme in which remote and
local (meta)data are integrated in a stepwise manner
over time. We introduce metadata implantation and
stepwise evolution techniques to interrelate database
elements in different databases, and to resolve
conflicts on the structure and semantics of database
elements (classes, attributes, and individual
instances). We employ a semantically rich canonical
data model, and an incremental integration and semantic
heterogeneity resolution scheme. In our approach,
relationships between local and remote information
units are determined whenever enough knowledge about
their semantics is acquired. The metadata folding
problem is solved by implanting remote database
elements into the local database, a process that
imports remote database elements into the local
database environment, hypothesizes the relevance of
local and remote classes, and customizes the
organization of remote metadata. We have implemented a
prototype system and demonstrated its use in an
experimental neuroscience environment.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database integration; database interoperability;
federated databases; schema evolution; semantic
heterogeneity resolution",
remark = "Check month: April or May??",
}
@Article{Law:1999:ESI,
author = "Kelvin K. W. Law and John C. S. Lui and Leana
Golubchik",
title = "Efficient Support for Interactive Service in
Multi-Resolution {VOD} Systems",
journal = j-VLDB-J,
volume = "8",
number = "2",
pages = "133--153",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:50 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t9008002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Golubchik:Leana.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Law:Kelvin_K=_W=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lui:John_C=_S=.html;
http://link.springer.de/link/service/journals/00778/bibs/9008002/90080133.htm;
http://link.springer.de/link/service/journals/00778/papers/9008002/90080133.pdf",
abstract = "Advances in high-speed networks and multimedia
technologies have made it feasible to provide
video-on-demand (VOD) services to users. However, it is
still a challenging task to design a cost-effective VOD
system that can support a large number of clients (who
may have different quality of service (QoS)
requirements) and, at the same time, provide different
types of VCR functionalities. Although it has been
recognized that VCR operations are important
functionalities in providing VOD service, techniques
proposed in the past for providing VCR operations may
require additional system resources, such as extra disk
I/O, additional buffer space, as well as network
bandwidth. In this paper, we consider the design of a
VOD storage server that has the following features: (1)
provision of different levels of display resolutions to
users who have different QoS requirements, (2)
provision of different types of VCR functionalities,
such as fast forward and rewind, without imposing
additional demand on the system buffer space, I/O
bandwidth, and network bandwidth, and (3) guarantees of
the load-balancing property across all disks during
normal and VCR display periods. The above-mentioned
features are especially important because they simplify
the design of the buffer space, I/O, and network
resource allocation policies of the VOD storage system.
The load-balancing property also ensures that no single
disk will be the bottleneck of the system. In this
paper, we propose data block placement, admission
control, and I/O-scheduling algorithms, as well as
determine the corresponding buffer space requirements
of the proposed VOD storage system. We show that the
proposed VOD system can provide VCR and
multi-resolution services to the viewing clients and at
the same time maintain the load-balancing property.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "interactive services; multi-resolution services;
multimedia servers; VOD systems",
remark = "Check month: April or May??",
}
@Article{Shmueli:2000:FVP,
author = "O. Shmueli and J. Widom",
title = "Foreword by the {VLDB} `98 {PC Chairmen}",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "155--155",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Widom:2000:BPV,
author = "Jennifer Widom and Oded Shmueli",
title = "Best Papers of {VLDB `98, New York: Foreword by the
VLDB `98 PC Chairmen: Best Papers of VLDB `98}",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "155--155",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 10:11:55 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Shmueli:Oded.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Widom:Jennifer.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080155.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080155.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
xxauthor = "O. Shmueli and J. Widom",
}
@Article{Braumandl:2000:FJP,
author = "Reinhard Braumandl and Jens Clau{\ss}en and Alfons
Kemper and Donald Kossmann",
title = "Functional-Join Processing",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "156--177",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Braumandl:Reinhard.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Clau=szlig=en:Jens.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kemper:Alfons.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kossmann:Donald.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080156.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080156.pdf",
abstract = "Inter-object references are one of the key concepts of
object-relational and object-oriented database systems.
In this work, we investigate alternative techniques to
implement inter-object references and make the best use
of them in query processing, i.e., in evaluating
functional joins. We will give a comprehensive overview
and performance evaluation of all known techniques for
simple (single-valued) as well as multi-valued
functional joins. Furthermore, we will describe special
{\em order-preserving\/\/} functional-join techniques
that are particularly attractive for decision support
queries that require ordered results. While most of the
presentation of this paper is focused on
object-relational and object-oriented database systems,
some of the results can also be applied to plain
relational databases because {\em index nested-loop
joins\/\/} along key/foreign-key relationships, as they
are frequently found in relational databases, are just
one particular way to execute a functional join.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "functional join; logical OID; object identifier;
order-preserving join; physical OID; pointer join;
query processing",
}
@Article{George:2000:SBF,
author = "Binto George and Jayant R. Haritsa",
title = "Secure Buffering in Firm Real-Time Database Systems",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "178--198",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/George:Binto.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/h/Haritsa:Jayant_R=.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080178.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080178.pdf",
abstract = "Many real-time database applications arise in
electronic financial services, safety-critical
installations and military systems where enforcing is
crucial to the success of the enterprise. We
investigate here the performance implications, in terms
of killed transactions, of guaranteeing {\em
multi-level secrecy\/} in a real-time database system
supporting applications with {\em firm\/} deadlines. In
particular, we focus on the {\em buffer management\/}
aspects of this issue. Our main contributions are the
following. First, we identify the importance and
difficulties of providing secure buffer management in
the real-time database environment. Second, we present
, a novel buffer management algorithm that provides
{\em covert-channel-free\/} security. SABRE employs a
fully dynamic one-copy allocation policy for efficient
usage of buffer resources. It also incorporates several
optimizations for reducing the overall number of killed
transactions and for decreasing the unfairness in the
distribution of killed transactions across security
levels. Third, using a detailed simulation model, the
real-time performance of SABRE is evaluated against
unsecure conventional and real-time buffer management
policies for a variety of security-classified
transaction workloads and system configurations. Our
experiments show that SABRE provides security with only
a modest drop in real-time performance. Finally, we
evaluate SABRE's performance when augmented with the
GUARD adaptive admission control policy. Our
experiments show that this combination provides close
to ideal fairness for real-time applications that can
tolerate covert-channel bandwidths of up to one bit per
second (a limit specified in military standards).",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "buffer management; covert channels; firm deadlines;
real-time database",
}
@Article{Muth:2000:LLS,
author = "Peter Muth and Patrick E. O'Neil and Achim Pick and
Gerhard Weikum",
title = "The {LHAM} Log-Structured History Data Access Method",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "199--221",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Muth:Peter.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/O=Neil:Patrick_E=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Pick:Achim.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/w/Weikum:Gerhard.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080199.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080199.pdf",
abstract = "Numerous applications such as stock market or medical
information systems require that both historical and
current data be logically integrated into a temporal
database. The underlying access method must support
different forms of ``time-travel'' queries, the
migration of old record versions onto inexpensive
archive media, and high insertion and update rates.
This paper presents an access method for
transaction-time temporal data, called the
log-structured history data access method (LHAM) that
meets these demands. The basic principle of LHAM is to
partition the data into successive components based on
the timestamps of the record versions. Components are
assigned to different levels of a storage hierarchy,
and incoming data is continuously migrated through the
hierarchy. The paper discusses the LHAM concepts,
including concurrency control and recovery, our
full-fledged LHAM implementation, and experimental
performance results based on this implementation. A
detailed comparison with the TSB-tree, both
analytically and based on experiments with real
implementations, shows that LHAM is highly superior in
terms of insert performance, while query performance is
in almost all cases at least as good as for the
TSB-tree; in many cases it is much better.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data warehouses; index structures; performance;
storage systems; temporal databases",
}
@Article{Gibson:2000:CCD,
author = "David Gibson and Jon M. Kleinberg and Prabhakar
Raghavan",
title = "Clustering Categorical Data: An Approach Based on
Dynamical Systems",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "222--236",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Gibson:David.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kleinberg:Jon_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Raghavan:Prabhakar.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080222.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080222.pdf",
abstract = "We describe a novel approach for clustering
collections of sets, and its application to the
analysis and mining of categorical data. By
``categorical data,'' we mean tables with fields that
cannot be naturally ordered by a metric --- e.g., the
names of producers of automobiles, or the names of
products offered by a manufacturer. Our approach is
based on an iterative method for assigning and
propagating weights on the categorical values in a
table; this facilitates a type of similarity measure
arising from the co-occurrence of values in the
dataset. Our techniques can be studied analytically in
terms of certain types of non-linear dynamical
systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "categorical data; clustering; data mining; dynamical
systems; hypergraphs",
}
@Article{Knorr:2000:DBO,
author = "Edwin M. Knorr and Raymond T. Ng and Vladimir
Tucakov",
title = "Distance-Based Outliers: Algorithms and Applications",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "237--253",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Knorr:Edwin_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/n/Ng:Raymond_T=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tucakov:V=.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080237.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080237.pdf",
abstract = "This paper deals with finding outliers (exceptions) in
large, multidimensional datasets. The identification of
outliers can lead to the discovery of truly unexpected
knowledge in areas such as electronic commerce, credit
card fraud, and even the analysis of performance
statistics of professional athletes. Existing methods
that we have seen for finding outliers can only deal
efficiently with two dimensions/attributes of a
dataset. In this paper, we study the notion of {\em
DB\/} ({\em distance-based\/}) outliers. Specifically,
we show that (i) outlier detection can be done {\em
efficiently\/} for {\em large\/} datasets, and for
$k$-dimensional datasets with large values of $k$
(e.g., $k \ge 5$); and (ii), outlier detection is a
{\em meaningful\/} and important knowledge discovery
task. First, we present two simple algorithms, both
having a complexity of $O(k \: N^2)$, $k$ being the
dimensionality and $N$ being the number of objects in
the dataset. These algorithms readily support datasets
with many more than two attributes. Second, we present
an optimized cell-based algorithm that has a complexity
that is linear with respect to $N$, but exponential
with respect to $k$. We provide experimental results
indicating that this algorithm significantly
outperforms the two simple algorithms for $k \leq 4$.
Third, for datasets that are mainly disk-resident, we
present another version of the cell-based algorithm
that guarantees at most three passes over a dataset.
Again, experimental results show that this algorithm is
by far the best for $k \leq 4$. Finally, we discuss our
work on three real-life applications, including one on
spatio-temporal data (e.g., a video surveillance
application), in order to confirm the relevance and
broad applicability of {\em DB\/} outliers.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "algorithms; data mining; data mining applications;
outliers\slash exceptions",
}
@Article{Korn:2000:QDM,
author = "Flip Korn and Alexandros Labrinidis and Yannis Kotidis
and Christos Faloutsos",
title = "Quantifiable Data Mining Using Ratio Rules",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "254--266",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Faloutsos:Christos.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Korn:Flip.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/k/Kotidis:Yannis.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Labrinidis:Alexandros.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080254.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080254.pdf",
abstract = "Association Rule Mining algorithms operate on a data
matrix (e.g., customers $\times$ products) to derive
association rules [AIS93b, SA96]. We propose a new
paradigm, namely, {\em Ratio Rules}, which are
quantifiable in that we can measure the ``goodness'' of
a set of discovered rules. We also propose the
``guessing error'' as a measure of the ``goodness'',
that is, the root-mean-square error of the
reconstructed values of the cells of the given matrix,
when we pretend that they are unknown. Another
contribution is a novel method to guess missing/hidden
values from the Ratio Rules that our method derives.
For example, if somebody bought $10 of milk and $3 of
bread, our rules can ``guess'' the amount spent on
butter. Thus, unlike association rules, Ratio Rules can
perform a variety of important tasks such as
forecasting, answering ``what-if'' scenarios, detecting
outliers, and visualizing the data. Moreover, we show
that we can compute Ratio Rules in a {\em single\/}
pass over the data set with small memory requirements
(a few small matrices), in contrast to association rule
mining methods which require multiple passes and/or
large memory. Experiments on several real data sets
(e.g., basketball and baseball statistics, biological
data) demonstrate that the proposed method: (a) leads
to rules that make sense; (b) can find large itemsets
in binary matrices, even in the presence of noise; and
(c) consistently achieves a ``guessing error'' of up to
5 times less than using straightforward column
averages.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data mining; forecasting; guessing error; knowledge
discovery",
}
@Article{Torp:2000:ETD,
author = "Kristian Torp and Christian S. Jensen and Richard
Thomas Snodgrass",
title = "Effective Timestamping in Databases",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "267--288",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/j/Jensen:Christian_S=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Snodgrass:Richard_T=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Torp:Kristian.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080267.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080267.pdf",
abstract = "Many existing database applications place various
timestamps on their data, rendering temporal values
such as dates and times prevalent in database tables.
During the past two decades, several dozen temporal
data models have appeared, all with timestamps being
integral components. The models have used timestamps
for encoding two specific temporal aspects of database
facts, namely transaction time, when the facts are
current in the database, and valid time, when the facts
are true in the modeled reality. However, with few
exceptions, the assignment of timestamp values has been
considered only in the context of individual
modification statements. This paper takes the next
logical step: It considers the use of timestamping for
capturing transaction and valid time in the context of
transactions. The paper initially identifies and
analyzes several problems with straightforward
timestamping, then proceeds to propose a variety of
techniques aimed at solving these problems.
Timestamping the results of a transaction with the
commit time of the transaction is a promising approach.
The paper studies how this timestamping may be done
using a spectrum of techniques. While many database
facts are valid until {\em now}, the current time, this
value is absent from the existing temporal types.
Techniques that address this problem using different
substitute values are presented. Using a stratum
architecture, the performance of the different proposed
techniques are studied. Although querying and modifying
time-varying data is accompanied by a number of subtle
problems, we present a comprehensive approach that
provides application programmers with simple,
consistent, and efficient support for modifying
bitemporal databases in the context of user
transactions.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "timestamping; transactions",
}
@Article{Sheikholeslami:2000:WWB,
author = "Gholamhosein Sheikholeslami and Surojit Chatterjee and
Aidong Zhang",
title = "{WaveCluster}: a Wavelet Based Clustering Approach
for Spatial Data in Very Large Databases",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "289--304",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chatterjee:Surojit.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sheikholeslami:Gholamhosein.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zhang:Aidong.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080289.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080289.pdf",
abstract = "Many applications require the management of spatial
data in a multidimensional feature space. Clustering
large spatial databases is an important problem, which
tries to find the densely populated regions in the
feature space to be used in data mining, knowledge
discovery, or efficient information retrieval. A good
clustering approach should be efficient and detect
clusters of arbitrary shape. It must be insensitive to
the noise (outliers) and the order of input data. We
propose {\em WaveCluster}, a novel clustering approach
based on wavelet transforms, which satisfies all the
above requirements. Using the multiresolution property
of wavelet transforms, we can effectively identify
arbitrarily shaped clusters at different degrees of
detail. We also demonstrate that {\em WaveCluster\/} is
highly efficient in terms of time complexity.
Experimental results on very large datasets are
presented, which show the efficiency and effectiveness
of the proposed approach compared to the other recent
clustering methods.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Pacitti:2000:UPS,
author = "Esther Pacitti and Eric Simon",
title = "Update Propagation Strategies to Improve Freshness in
Lazy Master Replicated Databases",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "305--318",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Pacitti:Esther.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Simon:Eric.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080305.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080305.pdf",
abstract = "Many distributed database applications need to
replicate data to improve data availability and query
response time. The two-phase commit protocol guarantees
mutual consistency of replicated data but does not
provide good performance. Lazy replication has been
used as an alternative solution in several types of
applications such as on-line financial transactions and
telecommunication systems. In this case, mutual
consistency is relaxed and the concept of freshness is
used to measure the deviation between replica copies.
In this paper, we propose two update propagation
strategies that improve freshness. Both of them use
immediate propagation: updates to a primary copy are
propagated towards a slave node as soon as they are
detected at the master node without waiting for the
commitment of the update transaction. Our performance
study shows that our strategies can improve data
freshness by up to five times compared with the
deferred approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data replication; distributed databases; performance
evaluation",
}
@Article{Liang:2000:OMD,
author = "Weifa Liang and Maria E. Orlowska and Jeffrey X. Yu",
title = "Optimizing Multiple Dimensional Queries Simultaneously
in Multidimensional Databases",
journal = j-VLDB-J,
volume = "8",
number = "3--4",
pages = "319--338",
month = feb,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:51 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb8.html;
http://link.springer.de/link/service/journals/00778/tocs/t0008003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Liang:Weifa.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/o/Orlowska:Maria_E=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/y/Yu:Jeffrey_X=.html;
http://link.springer.de/link/service/journals/00778/bibs/0008003/00080319.htm;
http://link.springer.de/link/service/journals/00778/papers/0008003/00080319.pdf",
abstract = "Some significant progress related to multidimensional
data analysis has been achieved in the past few years,
including the design of fast algorithms for computing
datacubes, selecting some precomputed group-bys to
materialize, and designing efficient storage structures
for multidimensional data. However, little work has
been carried out on multidimensional query optimization
issues. Particularly the response time (or evaluation
cost) for answering several related dimensional queries
simultaneously is crucial to the OLAP applications.
Recently, Zhao et al. first exploited this problem by
presenting three heuristic algorithms. In this paper we
first consider in detail two cases of the problem in
which all the queries are either hash-based star joins
or index-based star joins only. In the case of the
hash-based star join, we devise a polynomial
approximation algorithm which delivers a plan whose
evaluation cost is $O(n^\epsilon)$ times the optimal,
where $n$ is the number of queries and $\epsilon$ is a
fixed constant with $0 < \epsilon \leq 1$. We also
present an exponential algorithm which delivers a plan
with the optimal evaluation cost. In the case of the
index-based star join, we present a heuristic algorithm
which delivers a plan whose evaluation cost is $n$
times the optimal, and an exponential algorithm which
delivers a plan with the optimal evaluation cost. We
then consider a general case in which both hash-based
star-join and index-based star-join queries are
included. For this case, we give a possible improvement
on the work of Zhao et al., based on an analysis of
their solutions. We also develop another heuristic and
an exact algorithm for the problem. We finally conduct
a performance study by implementing our algorithms. The
experimental results demonstrate that the solutions
delivered for the restricted cases are always within
two times of the optimal, which confirms our
theoretical upper bounds. Actually these experiments
produce much better results than our theoretical
estimates. To the best of our knowledge, this is the
only development of polynomial algorithms for the first
two cases which are able to deliver plans with
deterministic performance guarantees in terms of the
qualities of the plans generated. The previous
approaches including that of [ZDNS98] may generate a
feasible plan for the problem in these two cases, but
they do not provide any performance guarantee, i.e.,
the plans generated by their algorithms can be
arbitrarily far from the optimal one.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data warehousing; MDDBs; multiple dimensional query
optimization; OLAP; query modeling",
}
@Article{Atzeni:2000:DWG,
author = "Paolo Atzeni and Alberto O. Mendelzon",
title = "Databases and the {Web}: Guest Editorial: Databases
and the {Web}",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "1--1",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Wed Sep 27 10:11:55 MDT 2000",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009001.htm;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/a/Atzeni:Paolo.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mendelzon:Alberto_O=.html;
http://link.springer.de/link/service/journals/00778/bibs/0009001/00090001.htm;
http://link.springer.de/link/service/journals/00778/papers/0009001/00090001.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Atzeni:2000:GE,
author = "Paolo Atzeni and Alberto O. Mendelzon",
title = "Guest editorial",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "1--1",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:52 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Chidlovskii:2000:SCW,
author = "Boris Chidlovskii and Uwe M. Borghoff",
title = "Semantic caching of {Web} queries",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "2--17",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:52 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Borghoff:Uwe_M=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chidlovskii:Boris.html;
http://link.springer.de/link/service/journals/00778/bibs/0009001/00090002.htm;
http://link.springer.de/link/service/journals/00778/papers/0009001/00090002.pdf",
abstract = "In meta-searchers accessing distributed Web-based
information repositories, performance is a major issue.
Efficient query processing requires an appropriate
caching mechanism. Unfortunately, standard page-based
as well as tuple-based caching mechanisms designed for
conventional databases are not efficient on the Web,
where keyword-based querying is often the only way to
retrieve data. In this work, we study the problem of
semantic caching of Web queries and develop a caching
mechanism for conjunctive Web queries based on {\em
signature files}. Our algorithms cope with both
relations of semantic containment and intersection
between a query and the corresponding cache items. We
also develop the cache replacement strategy to treat
situations when cached items differ in size and
contribution when providing partial query answers. We
report results of experiments and show how the caching
mechanism is realized in the Knowledge Broker system.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "experiments; query algorithms; region containment;
semantic caching; signature files",
}
@Article{Gruser:2000:LRT,
author = "Jean-Robert Gruser and Louiqa Raschid and Vladimir
Zadorozhny and Tao Zhan",
title = "Learning response time for {WebSources} using query
feedback and application in query optimization",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "18--37",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:52 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/g/Gruser:Jean=Robert.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/r/Raschid:Louiqa.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zadorozhny:Vladimir.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/z/Zhan:Tao.html;
http://link.springer.de/link/service/journals/00778/bibs/0009001/00090018.htm;
http://link.springer.de/link/service/journals/00778/papers/0009001/00090018.pdf",
abstract = "The rapid growth of the Internet and support for
interoperability protocols has increased the number of
Web accessible sources, WebSources. Current wrapper
mediator architectures need to be extended with a
wrapper cost model (WCM) for WebSources that can
estimate the response time (delays) to access sources
as well as other relevant statistics. In this paper, we
present a Web prediction tool (WebPT), a tool that is
based on learning using query feedback from WebSources.
The WebPT uses dimensions time of day, day, and
quantity of data, to learn response times from a
particular WebSource, and to predict the expected
response time (delay) for some query. Experiment data
was collected from several sources, and those
dimensions that were significant in estimating the
response time were determined. We then trained the
WebPT on the collected data, to use the three
dimensions mentioned above, and to predict the response
time, as well as a confidence in the prediction. We
describe the WebPT learning algorithms, and report on
the WebPT learning for WebSources. Our research shows
that we can improve the quality of learning by tuning
the WebPT features, e.g., training the WebPT using a
logarithm of the input training data; including
significant dimensions in the WebPT; or changing the
ordering of dimensions. A comparison of the WebPT with
more traditional neural network (NN) learning has been
performed, and we briefly report on the comparison. We
then demonstrate how the WebPT prediction of delay may
be used by a scrambling enabled optimizer. A scrambling
algorithm identifies some critical points of delay,
where it makes a decision to scramble (modify) a plan,
to attempt to hide the expected delay by computing some
other part of the plan that is unaffected by the delay.
We explore the space of real delay at a WebSource,
versus the WebPT prediction of this delay, with respect
to critical points of delay in specific plans. We
identify those cases where WebPT overestimation or
underestimation of the real delay results in a penalty
in the scrambling enabled optimizer, and those cases
where there is no penalty. Using the experimental data
and WebPT learning, we test how good the WebPT is in
minimizing these penalties.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data-intensive applications on the Web; query
languages and systems for Web data",
}
@Article{Fernandez:2000:DSW,
author = "Mary Fern{\'a}ndez and Daniela Florescu and Alon Levy
and Dan Suciu",
title = "Declarative specification of {Web} sites with {S}",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "38--55",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:52 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Fernandez:Mary_F=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Florescu:Daniela.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Levy:Alon_Y=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Suciu:Dan.html;
http://link.springer.de/link/service/journals/00778/bibs/0009001/00090038.htm;
http://link.springer.de/link/service/journals/00778/papers/0009001/00090038.pdf",
abstract = "S is a system for implementing {\em data-intensive\/}
Web sites, which typically integrate information from
multiple data sources and have complex structure. S's
key idea is separating the management of a Web site's
data, the specification of its content and structure,
and the visual representation of its pages. S provides
a declarative {\em query language\/} for specifying a
site's content and structure, and a simple {\em
template language\/} for specifying a site's HTML
representation. This paper contains a comprehensive
description of the S system and details the benefits of
declarative site specification. We describe our
experiences using S in a production application and
describe three different, but complementary, systems
that extend and improve upon S's original ideas.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "declarative query languages; web-site management",
xxauthor = "Mary F. Fernandez and Daniela Florescu and Alon Y.
Levy and Dan Suciu",
xxtitle = "Declarative Specification of {Web} Sites with
{Strudel}",
}
@Article{Berendt:2000:ANB,
author = "Bettina Berendt and Myra Spiliopoulou",
title = "Analysis of navigation behaviour in {Web} sites
integrating multiple information systems",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "56--75",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:52 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Berendt:Bettina.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Spiliopoulou:Myra.html;
http://link.springer.de/link/service/journals/00778/bibs/0009001/00090056.htm;
http://link.springer.de/link/service/journals/00778/papers/0009001/00090056.pdf",
abstract = "The analysis of web usage has mostly focused on sites
composed of conventional static pages. However, huge
amounts of information available in the web come from
databases or other data collections and are presented
to the users in the form of dynamically generated
pages. The query interfaces of such sites allow the
specification of many search criteria. Their generated
results support navigation to pages of results
combining cross-linked data from many sources. For the
analysis of visitor navigation behaviour in such web
sites, we propose the web usage miner (WUM), which
discovers navigation patterns subject to advanced
statistical and structural constraints. Since our
objective is the discovery of interesting navigation
patterns, we do not focus on accesses to individual
pages. Instead, we construct conceptual hierarchies
that reflect the query capabilities used in the
production of those pages. Our experiments with a real
web site that integrates data from multiple databases,
the German SchulWeb, demonstrate the appropriateness of
WUM in discovering navigation patterns and show how
those discoveries can help in assessing and improving
the quality of the site.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "conceptual hierarchies; data mining; query
capabilities; Web databases; Web query interfaces; Web
usage mining",
}
@Article{Buneman:2000:UQL,
author = "Peter Buneman and Mary F. Fernandez and Dan Suciu",
title = "{UnQL}: a query language and algebra for
semistructured data based on structural recursion",
journal = j-VLDB-J,
volume = "9",
number = "1",
pages = "76--110",
month = mar,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:52 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/b/Buneman:Peter.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Fernandez:Mary_F=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Suciu:Dan.html;
http://link.springer.de/link/service/journals/00778/bibs/0009001/00090076.htm;
http://link.springer.de/link/service/journals/00778/papers/0009001/00090076.pdf",
abstract = "This paper presents structural recursion as the basis
of the syntax and semantics of query languages for
semistructured data and XML. We describe a simple and
powerful query language based on pattern matching and
show that it can be expressed using structural
recursion, which is introduced as a top-down, recursive
function, similar to the way XSL is defined on XML
trees. On cyclic data, structural recursion can be
defined in two equivalent ways: as a recursive function
which evaluates the data top-down and remembers all its
calls to avoid infinite loops, or as a bulk evaluation
which processes the entire data in parallel using only
traditional relational algebra operators. The latter
makes it possible for optimization techniques in
relational queries to be applied to structural
recursion. We show that the composition of two
structural recursion queries can be expressed as a
single such query, and this is used as the basis of an
optimization method for mediator systems. Several other
formal properties are established: structural recursion
can be expressed in first-order logic extended with
transitive closure; its data complexity is PTIME; and
over relational data it is a conservative extension of
the relational calculus. The underlying data model is
based on value equality, formally defined with
bisimulation. Structural recursion is shown to be
invariant with respect to value equality.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "optimization; query language; semistructured data;
structural recursion; XML; XSL",
}
@Article{Mirbel:2000:CTI,
author = "Isabelle Mirbel and Barbara Pernici and Timos K.
Sellis and S. Tserkezoglou and Michalis Vazirgiannis",
title = "Checking the Temporal Integrity of Interactive
Multimedia Documents",
journal = j-VLDB-J,
volume = "9",
number = "2",
pages = "111--130",
month = jul,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:53 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Mirbel:Isabelle.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/p/Pernici:Barbara.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Sellis:Timos_K=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/t/Tserkezoglou:S=.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/v/Vazirgiannis:Michalis.html;
http://link.springer.de/link/service/journals/00778/bibs/0009002/00090111.htm;
http://link.springer.de/link/service/journals/00778/papers/0009002/00090111.pdf",
abstract = "When authoring multimedia scenarios, and in particular
scenarios with user interaction, where the sequence and
time of occurrence of interactions is not predefined,
it is difficult to guarantee the consistency of the
resulting scenarios. As a consequence, the {\em
execution\/} of the scenario may result in unexpected
behavior or inconsistent use of media. The present
paper proposes a methodology for checking the temporal
integrity of interactive multimedia document (IMD)
scenarios at authoring time at various levels. The IMD
flow is mainly defined by the events occurring during
the IMD session. Integrity checking consists of a set
of discrete steps, during which we transform the
scenario into temporal constraint networks representing
the constraints linking the different possible events
in the scenario. Temporal constraint verification
techniques are applied to verify the integrity of the
scenario, deriving a minimal network, showing possible
temporal relationships between events given a set of
constraints.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "constraint networks; multimedia presentation; temporal
integrity",
}
@Article{Candan:2000:VMM,
author = "K. Sel{\c{c}}uk Candan and Eric Lemar and V. S.
Subrahmanian",
title = "View management in multimedia databases",
journal = j-VLDB-J,
volume = "9",
number = "2",
pages = "131--153",
month = jul,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:53 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Candan:K=_Sel=ccedil=uk.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/l/Lemar:Eric.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/s/Subrahmanian:V=_S=.html;
http://link.springer.de/link/service/journals/00778/bibs/0009002/00090131.htm;
http://link.springer.de/link/service/journals/00778/papers/0009002/00090131.pdf",
abstract = "Though there has been extensive work on multimedia
databases in the last few years, there is no prevailing
notion of a multimedia view, nor there are techniques
to create, manage, and maintain such views. Visualizing
the results of a dynamic multimedia query or
materializing a dynamic multimedia view corresponds to
assembling and delivering an interactive multimedia
presentation in accordance with the visualization
specifications. In this paper, we suggest that a
non-interactive multimedia presentation is a set of
{\em virtual objects\/} with associated spatial and
temporal presentation constraints. A virtual object is
either an object, or the result of a query. As queries
may have different answers at different points in time,
scheduling the presentation of such objects is
nontrivial. We then develop a probabilistic model of
interactive multimedia presentations, extending the
non-interactive model described earlier. We also
develop a probabilistic model of interactive
visualization where the probabilities reflect the user
profiles, or the likelihood of certain user
interactions. Based on this probabilistic model, we
develop three utility-theoretic based types of
prefetching algorithms that anticipate how users will
interact with the presentation. These prefetching
algorithms allow efficient visualization of the query
results in accordance with the underlying
specification. We have built a prototype system that
incorporates these algorithms. We report on the results
of experiments conducted on top of this
implementation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "interactivity; multimedia databases; prefetching;
result visualization\slash presentation; view
management",
}
@Article{Fu:2000:DVT,
author = "Ada Wai-chee Fu and Polly Mei-shuen Chan and Yin-Ling
Cheung and Yiu Sang Moon",
title = "Dynamic vp-Tree Indexing for $n$-Nearest Neighbor
Search Given Pair-Wise Distances",
journal = j-VLDB-J,
volume = "9",
number = "2",
pages = "154--173",
month = jul,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:53 MDT 2008",
bibsource = "http://ftp.informatik.rwth-aachen.de/dblp/db/journals/vldb/vldb9.html;
http://link.springer.de/link/service/journals/00778/tocs/t0009002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Electronic edition.",
URL = "http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Chan:Polly_Mei=shuen.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/c/Cheung:Yin=Ling.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/f/Fu:Ada_Wai=Chee.html;
http://ftp.informatik.rwth-aachen.de/dblp/db/indices/a-tree/m/Moon:Yiu_Sang.html;
http://link.springer.de/link/service/journals/00778/bibs/0009002/00090154.htm;
http://link.springer.de/link/service/journals/00778/papers/0009002/00090154.pdf",
abstract = "For some multimedia applications, it has been found
that domain objects cannot be represented as feature
vectors in a multidimensional space. Instead, pair-wise
distances between data objects are the only input. To
support content-based retrieval, one approach maps each
object to a $k$dimensional ($k$d) point and tries to
preserve the distances among the points. Then, existing
spatial access index methods such as the R-trees and
KD-trees can support fast searching on the resulting
$k$d points. However, information loss is inevitable
with such an approach since the distances between data
objects can only be preserved to a certain extent. Here
we investigate the use of a distance-based indexing
method. In particular, we apply the vantage point tree
(vp-tree) method. There are two important problems for
the vp-tree method that warrant further investigation,
the $n$nearest neighbors search and the updating
mechanisms. We study an $n$nearest neighbors search
algorithm for the vp-tree, which is shown by
experiments to scale up well with the size of the
dataset and the desired number of nearest neighbors,
$n$. Experiments also show that the searching in the
vp-tree is more efficient than that for the $R^*$-tree
and the $M$-tree. Next, we propose solutions for the
update problem for the vp-tree, and show by experiments
that the algorithms are efficient and effective.
Finally, we investigate the problem of selecting
vantage-point, propose a few alternative methods, and
study their impact on the number of distance
computation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "content-based retrieval; indexing; nearest neighbor
search; pair-wise distances; updating",
}
@Article{Atkinson:2000:GE,
author = "Malcolm P. Atkinson",
title = "Guest editorial",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "175--176",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090175.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090175.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Bernstein:2000:CBP,
author = "Philip A. Bernstein and Shankar Pal and David Shutt",
title = "Context-based prefetch --- an optimization for
implementing objects on relations",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "177--189",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090177.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090177.pdf",
abstract = "When implementing persistent objects on a relational
database, a major performance issue is prefetching data
to minimize the number of round-trips to the database.
This is especially hard with navigational applications,
since future accesses are unpredictable. We propose the
use of the context in which an object is loaded as a
predictor of future accesses, where a context can be a
stored collection of relationships, a query result, or
a complex object. When an object O's state is loaded,
similar state for other objects in O's context is
prefetched. We present a design for maintaining context
and for using it to guide prefetch. We give performance
measurements of its implementation in Microsoft
Repository, showing up to a 70\% reduction in running
time. We describe several variations of the
optimization: selectively applying the technique based
on application and database characteristics, using
application-supplied performance hints, using
concurrent database queries to support asynchronous
prefetch, prefetching across relationship paths, and
delayed prefetch to save database round-trips.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "caching; object-oriented database; object-relational
mapping; prefetch",
}
@Article{Claussen:2000:EES,
author = "J. Claussen and A. Kemper and D. Kossmann and C.
Wiesner",
title = "Exploiting early sorting and early partitioning for
decision support query processing",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "190--213",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090190.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090190.pdf",
abstract = "Decision support queries typically involve several
joins, a grouping with aggregation, and/or sorting of
the result tuples. We propose two new classes of query
evaluation algorithms that can be used to speed up the
execution of such queries. The algorithms are based on
(1) {\em early sorting\/} and (2) {\em early
partitioning\/} --- or a combination of both. The idea
is to push the sorting and/or the partitioning to the
leaves, i.e., the base relations, of the query
evaluation plans (QEPs) and thereby avoid sorting or
partitioning large intermediate results generated by
the joins. Both early sorting and early partitioning
are used in combination with hash-based algorithms for
evaluating the join(s) and the grouping. To enable
early sorting, the sort order generated at an early
stage of the QEP is retained through an arbitrary
number of so-called {\em order-preserving hash joins}.
To make early partitioning applicable to a large class
of decision support queries, we generalize the
so-called hash teams proposed by Graefe et al. [GBC98].
Hash teams allow to perform several hash-based
operations (join and grouping) on the same attribute in
one pass without repartitioning intermediate results.
Our generalization consists of indirectly partitioning
the input data. Indirect partitioning means
partitioning the input data on an attribute that is not
directly needed for the next hash-based operation, and
it involves the construction of bitmaps to approximate
the partitioning for the attribute that is needed in
the next hash-based operation. Our performance
experiments show that such QEPs based on {\em early
sorting, early partitioning}, or both in combination
perform significantly better than conventional
strategies for many common classes of decision support
queries.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "decision support systems; early sorting and
partitioning; hash joins and hash teams; performance
evaluation; query processing and optimization",
}
@Article{Jagadish:2000:ODM,
author = "H. V. Jagadish and Olga Kapitskaia and Raymond T. Ng
and Divesh Srivastava",
title = "One-dimensional and multi-dimensional substring
selectivity estimation",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "214--230",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090214.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090214.pdf",
abstract = "With the increasing importance of XML, LDAP
directories, and text-based information sources on the
Internet, there is an ever-greater need to evaluate
queries involving (sub)string matching. In many cases,
matches need to be on multiple attributes/dimensions,
with correlations between the multiple dimensions.
Effective query optimization in this context requires
good selectivity estimates. In this paper, we use
pruned count-suffix trees (PSTs) as the basic data
structure for substring selectivity estimation. For the
1-D problem, we present a novel technique called MO
(Maximal Overlap). We then develop and analyze two 1-D
estimation algorithms, MOC and MOLC, based on MO and a
constraint-based characterization of all possible
completions of a given PST. For the $k$-D problem, we
first generalize PSTs to multiple dimensions and
develop a space- and time-efficient probabilistic
algorithm to construct $k$-D PSTs directly. We then
show how to extend MO to multiple dimensions. Finally,
we demonstrate, both analytically and experimentally,
that MO is both practical and substantially superior to
competing algorithms.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "maximal overlap; pruned count-suffix tree; short
memory property; string selectivity",
}
@Article{Manegold:2000:ODA,
author = "Stefan Manegold and Peter A. Boncz and Martin L.
Kersten",
title = "Optimizing database architecture for the new
bottleneck: memory access",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "231--246",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090231.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090231.pdf",
abstract = "In the past decade, advances in the speed of commodity
CPUs have far out-paced advances in memory latency.
Main-memory access is therefore increasingly a
performance bottleneck for many computer applications,
including database systems. In this article, we use a
simple scan test to show the severe impact of this
bottleneck. The insights gained are translated into
guidelines for database architecture, in terms of both
data structures and algorithms. We discuss how
vertically fragmented data structures optimize cache
performance on sequential data access. We then focus on
equi-join, typically a random-access operation, and
introduce radix algorithms for partitioned hash-join.
The performance of these algorithms is quantified using
a detailed analytical model that incorporates memory
access cost. Experiments that validate this model were
performed on the Monet database system. We obtained
exact statistics on events such as TLB misses and L1
and L2 cache misses by using hardware performance
counters found in modern CPUs. Using our cost model, we
show how the carefully tuned memory access pattern of
our radix algorithms makes them perform well, which is
confirmed by experimental results.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "decomposed storage model; implementation techniques;
join algorithms; main-memory databases; memory access
optimization; query processing",
}
@Article{Raman:2000:ODR,
author = "Vijayshankar Raman and Bhaskaran Raman and Joseph M.
Hellerstein",
title = "Online dynamic reordering",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "247--260",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090247.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090247.pdf",
abstract = "We present a pipelining, dynamically tunable {\em
reorder\/} operator for providing user control during
long running, data-intensive operations. Users can see
partial results and accordingly direct the processing
by specifying preferences for various data items; data
of interest is prioritized for early processing. The
reordering mechanism is efficient and non-blocking and
can be used over arbitrary data streams from files and
indexes, as well as continuous data feeds. We also
investigate several policies for the reordering based
on the performance goals of various typical
applications. We present performance results for
reordering in the context of an online aggregation
implementation in Informix and in the context of
sorting and scrolling in a large-scale spreadsheet. Our
experiments demonstrate that for a variety of data
distributions and applications, reordering is
responsive to dynamic preference changes, imposes
minimal overheads in overall completion time, and
provides dramatic improvements in the quality of the
feedback over time. Surprisingly, preliminary
experiments indicate that online reordering can also be
useful in traditional batch query processing, because
it can serve as a form of pipelined, approximate
sorting.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "Informix; interactive data processing; online
reordering; user control",
}
@Article{Tan:2000:PEN,
author = "Kian-Lee Tan and Cheng Hian Goh and Beng Chin Ooi",
title = "Progressive evaluation of nested aggregate queries",
journal = j-VLDB-J,
volume = "9",
number = "3",
pages = "261--278",
month = dec,
year = "2000",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:54 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t0009003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/0009003/00090261.htm;
http://link.springer.de/link/service/journals/00778/papers/0009003/00090261.pdf",
abstract = "In many decision-making scenarios, decision makers
require rapid feedback to their queries, which
typically involve aggregates. The traditional {\em
blocking execution model\/} can no longer meet the
demands of these users. One promising approach in the
literature, called {\em online aggregation}, evaluates
an aggregation query progressively as follows: as soon
as certain data have been evaluated, approximate
answers are produced with their respective running
confidence intervals; as more data are examined, the
answers and their corresponding running confidence
intervals are refined. In this paper, we extend this
approach to handle nested queries with aggregates
(i.e., at least one inner query block is an aggregate
query) by providing users with (approximate) answers
progressively as the inner aggregation query blocks are
evaluated. We address the new issues pose by nested
queries. In particular, the answer space begins with a
superset of the final answers and is refined as the
aggregates from the inner query blocks are refined. For
the intermediary answers to be meaningful, they have to
be interpreted with the aggregates from the inner
queries. We also propose a {\em multi-threaded model\/}
in evaluating such queries: each query block is
assigned to a thread, and the threads can be evaluated
concurrently and independently. The time slice across
the threads is {\em nondeterministic\/} in the sense
that the user controls the relative rate at which these
subqueries are being evaluated. For {\em enumerative\/}
nested queries, we propose a priority-based evaluation
strategy to present answers that are certainly in the
final answer space first, before presenting those whose
validity may be affected as the inner query aggregates
are refined. We implemented a prototype system using
Java and evaluated our system. Results for nested
queries with a level and multiple levels of nesting are
reported. Our results show the effectiveness of the
proposed mechanisms in providing progressive feedback
that reduces the initial waiting time of users
significantly without sacrificing the quality of the
answers.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "approximate answers; multi-threading; nested aggregate
queries; online aggregation; progressive query
processing",
}
@Article{Ngu:2001:CMV,
author = "Anne H. H. Ngu and Quan Z. Sheng and Du Q. Huynh and
Ron Lei",
title = "Combining multi-visual features for efficient indexing
in a large image database",
journal = j-VLDB-J,
volume = "9",
number = "4",
pages = "279--293",
month = apr,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100028",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:55 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1009004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1009004/10090279.htm;
http://link.springer.de/link/service/journals/00778/papers/1009004/10090279.pdf",
abstract = "The optimized distance-based access methods currently
available for multidimensional indexing in multimedia
databases have been developed based on two major
assumptions: a suitable distance function is known a
priori and the dimensionality of the image features is
low. It is not trivial to define a distance function
that best mimics human visual perception regarding
image similarity measurements. Reducing
high-dimensional features in images using the popular
principle component analysis (PCA) might not always be
possible due to the non-linear correlations that may be
present in the feature vectors. We propose in this
paper a fast and robust hybrid method for non-linear
dimensions reduction of composite image features for
indexing in large image database. This method
incorporates both the PCA and non-linear neural network
techniques to reduce the dimensions of feature vectors
so that an optimized access method can be applied. To
incorporate human visual perception into our system, we
also conducted experiments that involved a number of
subjects classifying images into different classes for
neural network training. We demonstrate that not only
can our neural network system reduce the dimensions of
the feature vectors, but that the reduced dimensional
feature vectors can also be mapped to an optimized
access method for fast and accurate indexing.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "high-dimensional indexing; image retrieval; neural
network",
}
@Article{Combi:2001:HTD,
author = "Carlo Combi and Giuseppe Pozzi",
title = "{{\em HMAP\/}} --- a temporal data model managing
intervals with different granularities and
indeterminacy from natural language sentences",
journal = j-VLDB-J,
volume = "9",
number = "4",
pages = "294--311",
month = apr,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100033",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:55 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1009004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1009004/10090294.htm;
http://link.springer.de/link/service/journals/00778/papers/1009004/10090294.pdf",
abstract = "The {\em granularity\/} of given temporal information
is the level of abstraction at which information is
expressed. Different units of measure allow one to
represent different granularities. Indeterminacy is
often present in temporal information given at
different granularities: temporal {\em indeterminacy\/}
is related to incomplete knowledge of when the
considered fact happened. Focusing on temporal
databases, different granularities and indeterminacy
have to be considered in expressing valid time, i.e.,
the time at which the information is true in the
modeled reality. In this paper, we propose {\em HMAP\/}
(The term is the transliteration of an ancient Greek
poetical word meaning ``day''.), a temporal data model
extending the capability of defining valid times with
different granularity and/or with indeterminacy. In
{\em HMAP}, absolute intervals are explicitly
represented by their {\em start}, {\em end}, and {\em
duration\/}: in this way, we can represent valid times
as ``in December 1998 for five hours'', ``from July
1995, for 15 days'', ``from March 1997 to October 15,
1997, between 6 and 6:30 p.m.''. {\em HMAP\/} is based
on a three-valued logic, for managing uncertainty in
temporal relationships. Formulas involving different
temporal relationships between intervals, instants, and
durations can be defined, allowing one to query the
database with different granularities, not necessarily
related to that of data. In this paper, we also discuss
the complexity of algorithms, allowing us to evaluate
{\em HMAP\/} formulas, and show that the formulas can
be expressed as constraint networks falling into the
class of simple temporal problems, which can be solved
in polynomial time.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "temporal databases; three-valued logic; time
granularity; time indeterminacy",
}
@Article{Li:2001:SEM,
author = "Wen-Syan Li and K. Sel{\c{c}}uk Candan and Kyoji
Hirata and Yoshinori Hara",
title = "Supporting efficient multimedia database exploration",
journal = j-VLDB-J,
volume = "9",
number = "4",
pages = "312--326",
month = apr,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100040",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:55 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1009004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1009004/10090312.htm;
http://link.springer.de/link/service/journals/00778/papers/1009004/10090312.pdf",
abstract = "Due to the fuzziness of query specification and media
matching, multimedia retrieval is conducted by way of
exploration. It is essential to provide feedback so
that users can visualize query reformulation
alternatives and database content distribution. Since
media matching is an expensive task, another issue is
how to efficiently support exploration so that the
system is not overloaded by perpetual query
reformulation. In this paper, we present a uniform
framework to represent statistical information of both
semantics and visual metadata for images in the
databases. We propose the concept of {\em query
verification}, which evaluates queries using
statistics, and provides users with feedback, including
the strictness and reformulation alternatives of each
query condition as well as estimated numbers of
matches. With query verification, the system increases
the efficiency of the multimedia database exploration
for both users and the system. Such statistical
information is also utilized to support progressive
query processing and query relaxation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "exploration; human computer interaction; multimedia
database; progressive processing; query relaxation;
selectivity statistics",
}
@Article{Lee:2001:GTM,
author = "Chiang Lee and Chi-Sheng Shih and Yaw-Huei Chen",
title = "A graph-theoretic model for optimizing queries
involving methods",
journal = j-VLDB-J,
volume = "9",
number = "4",
pages = "327--343",
month = apr,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100035",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:55 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1009004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1009004/10090327.htm;
http://link.springer.de/link/service/journals/00778/papers/1009004/10090327.pdf",
abstract = "Traditional algorithms for optimizing the execution
order of joins are no more valid when selections and
projections involve methods and become very expensive
operations. Selections and projections could be even
more costly than joins such that they are pulled above
joins, rather than pushed down in a query tree. In this
paper, we take a fundamental look at how to approach
query optimization from a top-down design perspective,
rather than trying to force one model to fit into
another. We present a graph model which is designed to
characterize execution plans. Each edge and each vertex
of the graph is assigned a weight to model execution
plans. We also design algorithms that use these weights
to optimize the execution order of operations. A cost
model of these algorithms is developed. Experiments are
conducted on the basis of this cost model. The results
show that our algorithms are superior to similar work
proposed in the literature.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "graph model; method query; object-oriented databases;
query optimization; spanning tree",
}
@Article{Wang:2001:IVH,
author = "Changzhou Wang and X. Sean Wang",
title = "Indexing very high-dimensional sparse and quasi-sparse
vectors for similarity searches",
journal = j-VLDB-J,
volume = "9",
number = "4",
pages = "344--361",
month = apr,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100036",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:55 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1009004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1009004/10090344.htm;
http://link.springer.de/link/service/journals/00778/papers/1009004/10090344.pdf",
abstract = "Similarity queries on complex objects are usually
translated into searches among their feature vectors.
This paper studies indexing techniques for very
high-dimensional (e.g., in hundreds) vectors that are
sparse or quasi-sparse, i.e., vectors {\em each\/}
having only a small number (e.g., ten) of non-zero or
significant values. Based on the R-tree, the paper
introduces the xS-tree that uses lossy compression of
bounding regions to guarantee a reasonable minimum
fan-out within the allocated storage space for each
node. In addition, the paper studies the performance
and scalability of the xS-tree via experiments.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "high-dimensional indexing structure; lossy
compression; quasi-sparse vector; similarity search;
sparse vector",
}
@Article{Casati:2001:GE,
author = "Fabio Casati and Ming-Chien Shan and Dimitrios
Georgakopoulos",
title = "Guest editorial",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "1--1",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100041",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100001.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100001.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Mecella:2001:DWC,
author = "Massimo Mecella and Barbara Pernici",
title = "Designing wrapper components for e-services in
integrating heterogeneous systems",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "2--15",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100044",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100002.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100002.pdf",
abstract = "Component-based approaches are becoming more and more
popular to support Internet-based application
development. Different component modeling approaches,
however, can be adopted, obtaining different
abstraction levels (either conceptual or operational).
In this paper we present a component-based architecture
for the design of e-applications, and discuss the
concept of wrapper components as building blocks for
the development of e-services, where these services are
based on legacy systems. We discuss their
characteristics and their applicability in
Internet-based application development.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "component; cooperation; e-application; e-service;
integration; legacy system; wrapper",
}
@Article{Eyal:2001:ICH,
author = "Anat Eyal and Tova Milo",
title = "Integrating and customizing heterogeneous e-commerce
applications",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "16--38",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100045",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100016.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100016.pdf",
abstract = "A broad spectrum of electronic commerce applications
is currently available on the Web, providing services
in almost any area one can think of. As the number and
variety of such applications grow, more business
opportunities emerge for providing new services based
on the integration and customization of existing
applications. (Web shopping malls and support for
comparative shopping are just a couple of examples.)
Unfortunately, the diversity of applications in each
specific domain and the disparity of interfaces,
application flows, actor roles in the business
transaction, and data formats, renders the integration
and manipulation of applications a rather difficult
task. In this paper we present the {\em Application
Manifold\/} system, aimed at simplifying the intricate
task of integration and customization of e-commerce
applications. The scope of the work in this paper is
limited to web-enabled e-commerce applications. We do
not support the integration/customization of
proprietary/legacy applications. The wrapping of such
applications as web services is complementary to our
work. Based on the emerging Web data standard, XML, and
application modeling standard, UML, the system offers a
novel declarative specification language for describing
the integration/customization task, supporting a
modular approach where new applications can be added
and integrated at will with minimal effort. Then,
acting as an application generator, the system
generates a full integrated/customized e-commerce
application, with the declarativity of the
specification allowing for the optimization and
verification of the generated application. The
integration here deals with the full profile of the
given e-commerce applications: the various services
offered by the applications, the activities and roles
of the different actors participating in the
application (e.g., customers, vendors), the application
flow, as well as with the data involved in the process.
This is in contrast to previous works on Web data
integration that focused primarily on querying the data
available in the applications, mostly ignoring the
additional aspects mentioned above.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "application integration; data integration; electronic
commerce",
}
@Article{Bonifati:2001:ARX,
author = "Angela Bonifati and Stefano Ceri and Stefano
Paraboschi",
title = "Active rules for {XML}: a new paradigm for
{E}-services",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "39--47",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100039",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100039.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100039.pdf",
abstract = "XML is rapidly becoming one of the most widely adopted
technologies for information exchange and
representation. As the use of XML becomes more
widespread, we foresee the development of active XML
rules, i.e., rules explicitly designed for the
management of XML information. In particular, we argue
that active rules for XML offer a natural paradigm for
the rapid development of innovative e-services. In the
paper, we show how active rules can be specified in the
context of XSLT, a pattern-based language for
publishing XML documents (promoted by the W3C) which is
receiving strong commercial support, and Lorel, a query
language for XML documents that is quite popular in the
research world. We demonstrate, through simple examples
of active rules for XSLT and Lorel, that active rules
can be effective for the implementation of e-commerce
services. We also discuss the various issues that need
to be considered in adapting the notion of relational
triggers to the XML context.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "active databases; document management; query languages
for XML; XML; XSLT",
}
@Article{Braumandl:2001:OUQ,
author = "R. Braumandl and M. Keidl and A. Kemper and D.
Kossmann and A. Kreutz and S. Seltzsam and K. Stocker",
title = "{ObjectGlobe}: {Ubiquitous} query processing on the
{Internet}",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "48--71",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100043",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100048.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100048.pdf",
abstract = "We present the design of ObjectGlobe, a distributed
and open query processor for Internet data sources.
Today, data is published on the Internet via Web
servers which have, if at all, very localized query
processing capabilities. The goal of the ObjectGlobe
project is to establish an open marketplace in which
{\em data\/} and {\em query processing capabilities\/}
can be distributed and used by any kind of Internet
application. Furthermore, ObjectGlobe integrates {\em
cycle providers\/} (i.e., machines) which carry out
query processing operators. The overall picture is to
make it possible to execute a query with --- in
principle --- unrelated query operators, cycle
providers, and data sources. Such an infrastructure can
serve as enabling technology for scalable e-commerce
applications, e.g., B2B and B2C market places, to be
able to integrate data and data processing operations
of a large number of participants. One of the main
challenges in the design of such an open system is to
ensure privacy and security. We discuss the ObjectGlobe
security requirements, show how basic components such
as the optimizer and runtime system need to be
extended, and present the results of performance
experiments that assess the additional cost for secure
distributed query processing. Another challenge is
quality of service management so that users can
constrain the costs and running times of their
queries.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cycle-; distributed query processing; function- and
data provider; open systems; privacy; quality of
service; query optimization; security",
}
@Article{Su:2001:IBN,
author = "Stanley Y. W. Su and Chunbo Huang and Joachim Hammer
and Yihua Huang and Haifei Li and Liu Wang and Youzhong
Liu and Charnyote Pluempitiwiriyawej and Minsoo Lee and
Herman Lam",
title = "An {Internet}-based negotiation server for
e-commerce",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "72--90",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100051",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100072.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100072.pdf",
abstract = "This paper describes the design and implementation of
a replicable, Internet-based negotiation server for
conducting bargaining-type negotiations between
enterprises involved in e-commerce and e-business.
Enterprises can be buyers and sellers of
products/services or participants of a complex supply
chain engaged in purchasing, planning, and scheduling.
Multiple copies of our server can be installed to
complement the services of Web servers. Each enterprise
can install or select a trusted negotiation server to
represent his/her interests. Web-based GUI tools are
used during the build-time registration process to
specify the requirements, constraints, and rules that
represent negotiation policies and strategies,
preference scoring of different data conditions, and
aggregation methods for deriving a global cost-benefit
score for the item(s) under negotiation. The
registration information is used by the negotiation
servers to automatically conduct bargaining type
negotiations on behalf of their clients. In this paper,
we present the architecture of our implementation as
well as a framework for automated negotiations, and
describe a number of communication primitives which are
used in the underlying negotiation protocol. A
constraint satisfaction processor (CSP) is used to
evaluate a negotiation proposal or counterproposal
against the registered requirements and constraints of
a client company. In case of a constraint violation, an
event is posted to trigger the execution of negotiation
strategic rules, which either automatically relax the
violated constraint, ask for human intervention, invoke
an application, or perform other remedial operations.
An Event-Trigger-Rule (ETR) server is used to manage
events, triggers, and rules. Negotiation strategic
rules can be added or modified at run-time. A
cost-benefit analysis component is used to perform
quantitative analysis of alternatives. The use of
negotiation servers to conduct automated negotiation
has been demonstrated in the context of an integrated
supply chain scenario.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "constraint evaluation; cost-benefit analysis;
database; e-commerce; negotiation policy and strategy;
negotiation protocol",
}
@Article{Shegalov:2001:XEW,
author = "German Shegalov and Michael Gillmann and Gerhard
Weikum",
title = "{XML}-enabled workflow management for e-services
across heterogeneous platforms",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "91--103",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100038",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100091.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100091.pdf",
abstract = "Advanced e-services require efficient, flexible, and
easy-to-use workflow technology that integrates well
with mainstream Internet technologies such as XML and
Web servers. This paper discusses an XML-enabled
architecture for distributed workflow management that
is implemented in the latest version of our Mentor-lite
prototype system. The key asset of this architecture is
an XML mediator that handles the exchange of business
and flow control data between workflow and
business-object servers on the one hand and client
activities on the other via XML messages over http. Our
implementation of the mediator has made use of Oracle's
XSQL servlet. The major benefit of the advocated
architecture is that it provides seamless integration
of client applications into e-service workflows with
scalable efficiency and very little explicit coding, in
contrast to an earlier, Java-based, version of our
Mentor-lite prototype that required much more code and
exhibited potential performance problems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "business processes; information system
interoperability; Internet e-services; workflow
management; XML/XSL",
}
@Article{Datta:2001:ASS,
author = "Anindya Datta and Kaushik Dutta and Debra VanderMeer
and Krithi Ramamritham and Shamkant B. Navathe",
title = "An architecture to support scalable online
personalization on the {Web}",
journal = j-VLDB-J,
volume = "10",
number = "1",
pages = "104--117",
month = aug,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100037",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:56 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010001/10100104.htm;
http://link.springer.de/link/service/journals/00778/papers/1010001/10100104.pdf",
abstract = "Online personalization is of great interest to
e-companies. Virtually all personalization technologies
are based on the idea of storing as much historical
customer session data as possible, and then querying
the data store as customers navigate through a web
site. The holy grail of online personalization is an
environment where fine-grained, detailed historical
session data can be queried based on current online
navigation patterns for use in formulating real-time
responses. Unfortunately, as more consumers become
e-shoppers, the user load and the amount of historical
data continue to increase, causing scalability-related
problems for almost all current personalization
technologies. This paper chronicles the development of
a real-time interaction management system through the
integration of historical data and online visitation
patterns of e-commerce site visitors. It describes the
scientific underpinnings of the system as well as its
architecture. Experimental evaluation of the system
shows that the caching and storage techniques built
into the system deliver performance that is orders of
magnitude better than those derived from off-the-shelf
database components.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "behavior-based personalization; dynamic lookahead
profile; profile caching; scalable online
personalization; Web site and interaction model",
}
@Article{ElAbbadi:2001:GE,
author = "Amr {El Abbadi} and Gunter Schlageter and Kyu-Young
Whang",
title = "Guest editorial",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "119--119",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100053",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100119.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100119.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Pucheral:2001:PSD,
author = "Philippe Pucheral and Luc Bouganim and Patrick
Valduriez and Christophe Bobineau",
title = "{PicoDBMS}: {Scaling} down database techniques for the
smartcard",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "120--132",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100047",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100120.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100120.pdf",
abstract = "Smartcards are the most secure portable computing
device today. They have been used successfully in
applications involving money, and proprietary and
personal data (such as banking, healthcare, insurance,
etc.). As smartcards get more powerful (with 32-bit CPU
and more than 1 MB of stable memory in the next
versions) and become multi-application, the need for
database management arises. However, smartcards have
severe hardware limitations (very slow write, very
little RAM, constrained stable memory, no autonomy,
etc.) which make traditional database technology
irrelevant. The major problem is scaling down database
techniques so they perform well under these
limitations. In this paper, we give an in-depth
analysis of this problem and propose a PicoDBMS
solution based on highly compact data structures, query
execution without RAM, and specific techniques for
atomicity and durability. We show the effectiveness of
our techniques through performance evaluation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "atomicity; durability; execution model; PicoDBMS;
query optimization; smartcard applications; storage
model",
}
@Article{Shanmugasundaram:2001:EPR,
author = "Jayavel Shanmugasundaram and Eugene Shekita and Rimon
Barr and Michael Carey and Bruce Lindsay and Hamid
Pirahesh and Berthold Reinwald",
title = "Efficiently publishing relational data as {XML}
documents",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "133--154",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100052",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100133.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100133.pdf",
abstract = "XML is rapidly emerging as a standard for exchanging
business data on the World Wide Web. For the
foreseeable future, however, most business data will
continue to be stored in relational database systems.
Consequently, if XML is to fulfill its potential, some
mechanism is needed to publish relational data as XML
documents. Towards that goal, one of the major
challenges is finding a way to efficiently structure
and tag data from one or more tables as a hierarchical
XML document. Different alternatives are possible
depending on when this processing takes place and how
much of it is done inside the relational engine. In
this paper, we characterize and study the performance
of these alternatives. Among other things, we explore
the use of new scalar and aggregate functions in SQL
for constructing complex XML documents directly in the
relational engine. We also explore different execution
plans for generating the content of an XML document.
The results of an experimental study show that
constructing XML documents inside the relational engine
can have a significant performance benefit. Our results
also show the superiority of having the relational
engine use what we call an ``outer union plan'' to
generate the content of an XML document.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "publishing; relational databases; XML",
}
@Article{Chang:2001:AQM,
author = "Kevin Chen-Chuan Chang and H{\'e}ctor
Garc{\'\i}a-Molina",
title = "Approximate query mapping: {Accounting} for
translation closeness",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "155--181",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100042",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100155.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100155.pdf",
abstract = "In this paper we present a mechanism for approximately
translating Boolean query constraints across
heterogeneous information sources. Achieving the best
translation is challenging because sources support
different constraints for formulating queries, and
often these constraints cannot be precisely translated.
For instance, a query [score>8] might be ``perfectly''
translated as [rating>0.8] at some site, but can only
be approximated as [grade=A] at another. Unlike other
work, our general framework adopts a customizable
``closeness'' metric for the translation that combines
both precision and recall. Our results show that for
query translation we need to handle interdependencies
among both query conjuncts as well as disjuncts. As the
basis, we identify the essential requirements of a rule
system for users to encode the mappings for atomic
semantic units. Our algorithm then translates complex
queries by rewriting them in terms of the semantic
units. We show that, under practical assumptions, our
algorithm generates the best approximate translations
with respect to the closeness metric of choice. We also
present a case study to show how our technique may be
applied in practice.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "approximate query translation; closeness;
constraint-mapping; information integration;
mediators",
}
@Article{Pottinger:2001:MSA,
author = "Rachel Pottinger and Alon Halevy",
title = "{MiniCon}: a scalable algorithm for answering
queries using views",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "182--198",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100048",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100182.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100182.pdf",
abstract = "The problem of answering queries using views is to
find efficient methods of answering a query using a set
of previously materialized views over the database,
rather than accessing the database relations. The
problem has received significant attention because of
its relevance to a wide variety of data management
problems, such as data integration, query optimization,
and the maintenance of physical data independence. To
date, the performance of proposed algorithms has
received very little attention, and in particular,
their scale up in the presence of a large number of
views is unknown. We first analyze two previous
algorithms, the bucket algorithm and the inverse-rules,
and show their deficiencies. We then describe the
MiniCon, a novel algorithm for finding the
maximally-contained rewriting of a conjunctive query
using a set of conjunctive views. We present the first
experimental study of algorithms for answering queries
using views. The study shows that the MiniCon scales up
well and significantly outperforms the previous
algorithms. We describe an extension of the MiniCon to
handle comparison predicates, and show its performance
experimentally. Finally, we describe how the MiniCon
can be extended to the context of query optimization.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data integration; materialized views; query
optimization; Web and databases",
}
@Article{Chakrabarti:2001:AQP,
author = "Kaushik Chakrabarti and Minos Garofalakis and Rajeev
Rastogi and Kyuseok Shim",
title = "Approximate query processing using wavelets",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "199--223",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100049",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100199.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100199.pdf",
abstract = "Approximate query processing has emerged as a
cost-effective approach for dealing with the huge data
volumes and stringent response-time requirements of
today's decision support systems (DSS). Most work in
this area, however, has so far been limited in its
query processing scope, typically focusing on specific
forms of aggregate queries. Furthermore, conventional
approaches based on sampling or histograms appear to be
inherently limited when it comes to approximating the
results of complex queries over high-dimensional DSS
data sets. In this paper, we propose the use of
multi-dimensional wavelets as an effective tool for
general-purpose approximate query processing in modern,
high-dimensional applications. Our approach is based on
building {\em wavelet-coefficient synopses\/} of the
data and using these synopses to provide approximate
answers to queries. We develop novel query processing
algorithms that operate directly on the
wavelet-coefficient synopses of relational tables,
allowing us to process arbitrarily complex queries {\em
entirely\/} in the wavelet-coefficient domain. This
guarantees extremely fast response times since our
approximate query execution engine can do the bulk of
its processing over compact sets of wavelet
coefficients, essentially postponing the expansion into
relational tuples until the end-result of the query. We
also propose a novel wavelet decomposition algorithm
that can build these synopses in an I/O-efficient
manner. Finally, we conduct an extensive experimental
study with synthetic as well as real-life data sets to
determine the effectiveness of our wavelet-based
approach compared to sampling and histograms. Our
results demonstrate that our techniques: (1) provide
approximate answers of better quality than either
sampling or histograms; (2) offer query execution-time
speedups of more than two orders of magnitude; and (3)
guarantee extremely fast synopsis construction times
that scale linearly with the size of the data.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "approximate query answers; data synopses; query
processing; wavelet decomposition",
}
@Article{Sarawagi:2001:UCM,
author = "Sunita Sarawagi",
title = "User-cognizant multidimensional analysis",
journal = j-VLDB-J,
volume = "10",
number = "2--3",
pages = "224--239",
month = sep,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100046",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:58 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010002/10100224.htm;
http://link.springer.de/link/service/journals/00778/papers/1010002/10100224.pdf",
abstract = "Our goal is to enhance multidimensional database
systems with a suite of advanced operators to automate
data analysis tasks that are currently handled through
manual exploration. In this paper, we present a key
component of our system that characterizes the
information content of a cell based on a user's prior
familiarity with the cube and provides a
context-sensitive exploration of the cube. There are
three main modules of this component. A Tracker, that
continuously tracks the parts of the cube that a user
has visited. A Modeler, that pieces together the
information in the visited parts to model the user's
expected values in the unvisited parts. An Informer,
that processes user's queries about the most
informative unvisited parts of the cube. The
mathematical basis for the expected value modeling is
provided by the classical maximum entropy principle.
Accordingly, the expected values are computed so as to
agree with every value that is already visited while
reducing assumptions about unvisited values to the
minimum by maximizing their entropy. The most
informative values are defined as those that bring the
new expected values closest to the actual values. We
believe and prove through experiments that such a
user-in-the-loop exploration will enable much faster
assimilation of all significant information in the data
compared to existing manual explorations.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "maximum entropy; multidimensional data exploration;
OLAP; personalized mining; user-sensitive interest
measure",
}
@Article{Turker:2001:SIS,
author = "Can T{\"u}rker and Michael Gertz",
title = "Semantic integrity support in {SQL:1999} and
commercial (object-)relational database management
systems",
journal = j-VLDB-J,
volume = "10",
number = "4",
pages = "241--269",
month = dec,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100050",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:59 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010004/10100241.htm;
http://link.springer.de/link/service/journals/00778/papers/1010004/10100241.pdf",
abstract = "The correctness of the data managed by database
systems is vital to any application that utilizes data
for business, research, and decision-making purposes.
To guard databases against erroneous data not
reflecting real-world data or business rules, semantic
integrity constraints can be specified during database
design. Current commercial database management systems
provide various means to implement mechanisms to
enforce semantic integrity constraints at database
run-time. In this paper, we give an overview of the
semantic integrity support in the most recent
SQL-standard SQL:1999, and we show to what extent the
different concepts and language constructs proposed in
this standard can be found in major commercial
(object-)relational database management systems. In
addition, we discuss general design guidelines that
point out how the semantic integrity features provided
by these systems should be utilized in order to
implement an effective integrity enforcing subsystem
for a database.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "constraint enforcement; object-relational databases;
semantic integrity constraints; SQL:1999",
}
@Article{Halevy:2001:AQU,
author = "Alon Y. Halevy",
title = "Answering queries using views: a survey",
journal = j-VLDB-J,
volume = "10",
number = "4",
pages = "270--294",
month = dec,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100054",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:59 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010004/10100270.htm;
http://link.springer.de/link/service/journals/00778/papers/1010004/10100270.pdf",
abstract = "The problem of answering queries using views is to
find efficient methods of answering a query using a set
of previously defined materialized views over the
database, rather than accessing the database relations.
The problem has recently received significant attention
because of its relevance to a wide variety of data
management problems. In query optimization, finding a
rewriting of a query using a set of materialized views
can yield a more efficient query execution plan. To
support the separation of the logical and physical
views of data, a storage schema can be described using
views over the logical schema. As a result, finding a
query execution plan that accesses the storage amounts
to solving the problem of answering queries using
views. Finally, the problem arises in data integration
systems, where data sources can be described as
precomputed views over a mediated schema. This article
surveys the state of the art on the problem of
answering queries using views, and synthesizes the
disparate works into a coherent framework. We describe
the different applications of the problem, the
algorithms proposed to solve it and the relevant
theoretical results.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data integration; date warehousing; materialized
views; query optimization; survey; Web-site
management",
}
@Article{Laurent:2001:MCI,
author = "D. Laurent and J. Lechtenb{\"o}rger and N. Spyratos
and G. Vossen",
title = "Monotonic complements for independent data
warehouses",
journal = j-VLDB-J,
volume = "10",
number = "4",
pages = "295--315",
month = dec,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100055",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:59 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010004/10100295.htm;
http://link.springer.de/link/service/journals/00778/papers/1010004/10100295.pdf",
abstract = "Views over databases have regained attention in the
context of data warehouses, which are seen as {\em
materialized\/} views. In this setting, efficient view
maintenance is an important issue, for which the notion
of {\em self-maintainability\/} has been identified as
desirable. In this paper, we extend the concept of
self-maintainability to (query and update) {\em
independence\/} within a formal framework, where
independence with respect to arbitrary given sets of
queries and updates over the sources can be guaranteed.
To this end we establish an intuitively appealing
connection between warehouse independence and {\em view
complements}. Moreover, we study special kinds of
complements, namely {\em monotonic complements}, and
show how to compute minimal ones in the presence of
keys and foreign keys in the underlying databases.
Taking advantage of these complements, an algorithmic
approach is proposed for the specification of
independent warehouses with respect to given sets of
queries and updates.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data warehouse; independence; materialized view;
self-maintainability; view complement",
}
@Article{Grefen:2001:GTS,
author = "Paul Grefen and Jochem Vonk and Peter Apers",
title = "Global transaction support for workflow management
systems: from formal specification to practical
implementation",
journal = j-VLDB-J,
volume = "10",
number = "4",
pages = "316--333",
month = dec,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100056",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:59 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010004/10100316.htm;
http://link.springer.de/link/service/journals/00778/papers/1010004/10100316.pdf",
abstract = "In this paper, we present an approach to global
transaction management in workflow environments. The
transaction mechanism is based on the well-known notion
of compensation, but extended to deal with both
arbitrary process structures to allow cycles in
processes and safepoints to allow partial compensation
of processes. We present a formal specification of the
transaction model and transaction management algorithms
in set and graph theory, providing clear, unambiguous
transaction semantics. The specification is
straightforwardly mapped to a modular architecture, the
implementation of which is first applied in a testing
environment, then in the prototype of a commercial
workflow management system. The modular nature of the
resulting system allows easy distribution using
middleware technology. The path from abstract semantics
specification to concrete, real-world implementation of
a workflow transaction mechanism is thus covered in a
complete and coherent fashion. As such, this paper
provides a complete framework for the application of
well-founded transactional workflows.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "compensation; long-running transaction; transaction
management; workflow management",
}
@Article{Rahm:2001:SAA,
author = "Erhard Rahm and Philip A. Bernstein",
title = "A survey of approaches to automatic schema matching",
journal = j-VLDB-J,
volume = "10",
number = "4",
pages = "334--350",
month = dec,
year = "2001",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100057",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:50:59 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t1010004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/1010004/10100334.htm;
http://link.springer.de/link/service/journals/00778/papers/1010004/10100334.pdf",
abstract = "Schema matching is a basic problem in many database
application domains, such as data integration,
E-business, data warehousing, and semantic query
processing. In current implementations, schema matching
is typically performed manually, which has significant
limitations. On the other hand, previous research
papers have proposed many techniques to achieve a
partial automation of the match operation for specific
application domains. We present a taxonomy that covers
many of these existing approaches, and we describe the
approaches in some detail. In particular, we
distinguish between schema-level and instance-level,
element-level and structure-level, and language-based
and constraint-based matchers. Based on our
classification we review some previous match
implementations thereby indicating which part of the
solution space they cover. We intend our taxonomy and
review of past work to be useful when comparing
different approaches to schema matching, when
developing a new match algorithm, and when implementing
a schema matching component.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "graph matching; machine learning; model management;
schema integration; schema matching",
}
@Article{Saltenis:2002:INR,
author = "Simonas {\v{S}}altenis and Christian S. Jensen",
title = "Indexing of now-relative spatio-bitemporal data",
journal = j-VLDB-J,
volume = "11",
number = "1",
pages = "1--16",
month = aug,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100058",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:00 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011001/20110001.htm;
http://link.springer.de/link/service/journals/00778/papers/2011001/20110001.pdf",
abstract = "Real-world entities are inherently spatially and
temporally referenced, and database applications
increasingly exploit databases that record the past,
present, and anticipated future locations of entities,
e.g., the residences of customers obtained by the
geo-coding of addresses. Indices that efficiently
support queries on the spatio-temporal extents of such
entities are needed. However, past indexing research
has progressed in largely separate spatial and temporal
streams. Adding time dimensions to spatial indices, as
if time were a spatial dimension, neither supports nor
exploits the special properties of time. On the other
hand, temporal indices are generally not amenable to
extension with spatial dimensions. This paper proposes
the first efficient and versatile index for a general
class of spatio-temporal data: the discretely changing
spatial aspect of an object may be a point or may have
an extent; both transaction time and valid time are
supported, and a generalized notion of the current
time, {\em now}, is accommodated for both temporal
dimensions. The index is based on the R$^*$-tree and
provides means of prioritizing space versus time, which
enables it to adapt to spatially and temporally
restrictive queries. Performance experiments are
reported that evaluate pertinent aspects of the
index.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access method; bitemporal data; multidimensional
indexing; R-tree; spatio-temporal data; transaction
time; valid time",
}
@Article{Rafiei:2002:ERS,
author = "Davood Rafiei and Alberto O. Mendelzon",
title = "Efficient retrieval of similar shapes",
journal = j-VLDB-J,
volume = "11",
number = "1",
pages = "17--27",
month = aug,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780100059",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:00 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011001/20110017.htm;
http://link.springer.de/link/service/journals/00778/papers/2011001/20110017.pdf",
abstract = "We propose an indexing technique for the fast
retrieval of objects in 2D images based on similarity
between their boundary shapes. Our technique is robust
in the presence of noise and supports several important
notions of similarity including optimal matches
irrespective of variations in orientation and/or
position. Our method can also handle size-invariant
matches using a normalization technique, although
optimality is not guaranteed here. We implemented our
method and performed experiments on real (hand-written
digits) data. Our experimental results showed the
superiority of our method compared to search based on
sequential scanning, which is the only obvious
competitor. The performance gain of our method
increases with any increase in the number or the size
of shapes.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "Fourier descriptors; image databases; shape retrieval;
similarity queries; similarity retrieval",
}
@Article{Navarro:2002:SMS,
author = "Gonzalo Navarro",
title = "Searching in metric spaces by spatial approximation",
journal = j-VLDB-J,
volume = "11",
number = "1",
pages = "28--46",
month = aug,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780200060",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:00 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011001/20110028.htm;
http://link.springer.de/link/service/journals/00778/papers/2011001/20110028.pdf",
abstract = "We propose a new data structure to search in metric
spaces. A {\em metric space\/} is formed by a
collection of objects and a {\em distance function\/}
defined among them which satisfies the triangle
inequality. The goal is, given a set of objects and a
query, retrieve those objects close enough to the
query. The complexity measure is the number of
distances computed to achieve this goal. Our data
structure, called {\em sa-tree\/} (``spatial
approximation tree''), is based on approaching the
searched objects spatially, that is, getting closer and
closer to them, rather than the classic
divide-and-conquer approach of other data structures.
We analyze our method and show that the number of
distance evaluations to search among $n$ objects is
sublinear. We show experimentally that the {\em
sa-tree\/} is the best existing technique when the
metric space is hard to search or the query has low
selectivity. These are the most important unsolved
cases in real applications. As a practical advantage,
our data structure is one of the few that does not need
to tune parameters, which makes it appealing for use by
non-experts.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "multimedia databases; similarity or proximity search;
spatial and multidimensional search; spatial
approximation tree",
}
@Article{Mihaila:2002:LAD,
author = "George A. Mihaila and Louiqa Raschid and Anthony
Tomasic",
title = "Locating and accessing data repositories with
{WebSemantics}",
journal = j-VLDB-J,
volume = "11",
number = "1",
pages = "47--57",
month = aug,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780200061",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:00 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011001/20110047.htm;
http://link.springer.de/link/service/journals/00778/papers/2011001/20110047.pdf",
abstract = "Many collections of scientific data in particular
disciplines are available today on the World Wide Web.
Most of these data sources are compliant with some
standard for interoperable access. In addition, sources
may support a common semantics, i.e., a shared meaning
for the data types and their domains. However, sharing
data among a global community of users is still
difficult because of the following reasons: (i) data
providers need a mechanism for describing and
publishing available sources of data; (ii) data
administrators need a mechanism for discovering the
location of published sources and obtaining metadata
from these sources; and (iii) users need a mechanism
for browsing and selecting sources. This paper
describes a system, WebSemantics, that accomplishes the
above tasks. We describe an architecture for the
publication and discovery of scientific data sources,
which is an extension of the World Wide Web
architecture and protocols. We support catalogs
containing metadata about data sources for some
application domain. We define a language for
discovering sources and querying their metadata. We
then describe the WebSemantics prototype.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data discovery; data integration; mediators; query
languages; World Wide Web; XML",
}
@Article{Ferrari:2002:ASD,
author = "E. Ferrari and N. R. Adam and V. Atluri and E. Bertino
and U. Capuozzo",
title = "An authorization system for digital libraries",
journal = j-VLDB-J,
volume = "11",
number = "1",
pages = "58--67",
month = aug,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780200063",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:00 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011001/20110058.htm;
http://link.springer.de/link/service/journals/00778/papers/2011001/20110058.pdf",
abstract = "Digital Libraries (DLs) introduce several challenging
requirements with respect to the formulation,
specification, and enforcement of adequate data
protection policies. Unlike conventional database
environments, a DL environment typically is
characterized by a dynamic subject population, often
making accesses from remote locations, and by an
extraordinarily large amount of multimedia information,
stored in a variety of formats. Moreover, in a DL
environment, access policies are often specified based
on subject qualifications and characteristics, rather
than subject identity. Traditional authorization models
are not adequate to meet access control requirements of
DLs. In this paper, we present a {\em Digital Library
Authorization System\/} (DLAS). DLAS employs a
content-based authorization model, called a {\em
Digital Library Authorization Model\/} (DLAM) which was
proposed in previous work [1].",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access control; credentials; digital libraries",
}
@Article{Marathe:2002:QPT,
author = "Arunprasad P. Marathe and Kenneth Salem",
title = "Query processing techniques for arrays",
journal = j-VLDB-J,
volume = "11",
number = "1",
pages = "68--91",
month = aug,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780200062",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:00 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011001/20110068.htm;
http://link.springer.de/link/service/journals/00778/papers/2011001/20110068.pdf",
abstract = "Arrays are a common and important class of data. At
present, database systems do not provide adequate array
support: arrays can neither be easily defined nor
conveniently manipulated. Further, array manipulations
are not optimized. This paper describes a language
called the {\em Array Manipulation Language\/} (AML),
for expressing array manipulations, and a collection of
optimization techniques for AML expressions. In the AML
framework for array manipulation, arbitrary
externally-defined functions can be applied to arrays
in a structured manner. AML can be adapted to different
application domains by choosing appropriate external
function definitions. This paper concentrates on arrays
occurring in databases of digital images such as
satellite or medical images. AML queries can be treated
declaratively and subjected to rewrite optimizations.
Rewriting minimizes the number of applications of
potentially costly external functions required to
compute a query result. AML queries can also be
optimized for space. Query results are generated a
piece at a time by pipelined execution plans, and the
amount of memory required by a plan depends on the
order in which pieces are generated. An optimizer can
consider generating the pieces of the query result in a
variety of orders, and can efficiently choose orders
that require less space. An AML-based prototype array
database system called {\em ArrayDB\/} has been built,
and it is used to show the effectiveness of these
optimization techniques.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "array manipulation language; array query optimization;
declarative query language; memory-usage optimization;
pipelined evaluation; user-defined functions",
}
@Article{Sakurai:2002:SIH,
author = "Yasushi Sakurai and Masatoshi Yoshikawa and Shunsuke
Uemura and Haruhiko Kojima",
title = "Spatial indexing of high-dimensional data based on
relative approximation",
journal = j-VLDB-J,
volume = "11",
number = "2",
pages = "93--108",
month = oct,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0066-9",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:01 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011002/20110093.htm;
http://link.springer.de/link/service/journals/00778/papers/2011002/20110093.pdf",
abstract = "We propose a novel index structure, the A-tree
(approximation tree), for similarity searches in
high-dimensional data. The basic idea of the A-tree is
the introduction of virtual bounding rectangles (VBRs)
which contain and approximate MBRs or data objects.
VBRs can be represented quite compactly and thus affect
the tree configuration both quantitatively and
qualitatively. First, since tree nodes can contain a
large number of VBR entries, fanout becomes large,
which increases search speed. More importantly, we have
a free hand in arranging MBRs and VBRs in the tree
nodes. Each A-tree node contains an MBR and its
children VBRs. Therefore, by fetching an A-tree node,
we can obtain information on the exact position of a
parent MBR and the approximate position of its
children. We have performed experiments using both
synthetic and real data sets. For the real data sets,
the A-tree outperforms the SR-tree and the VA-file in
all dimensionalities up to 64 dimensions, which is the
highest dimension in our experiments. Additionally, we
propose a cost model for the A-tree. We verify the
validity of the cost model for synthetic and real data
sets.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "high-dimensional data; relative approximation;
similarity search",
}
@Article{Hjaltason:2002:SCP,
author = "Gisli R. Hjaltason and Hanan Samet",
title = "Speeding up construction of {PMR} quadtree-based
spatial indexes",
journal = j-VLDB-J,
volume = "11",
number = "2",
pages = "109--137",
month = oct,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0067-8",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:01 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011002/20110109.htm;
http://link.springer.de/link/service/journals/00778/papers/2011002/20110109.pdf",
abstract = "Spatial indexes, such as those based on the quadtree,
are important in spatial databases for efficient
execution of queries involving spatial constraints,
especially when the queries involve spatial joins. In
this paper we present a number of techniques for
speeding up the construction of quadtree-based spatial
indexes, specifically the PMR quadtree, which can index
arbitrary spatial data. We assume a quadtree
implementation using the ``linear quadtree'', a
disk-resident representation that stores objects
contained in the leaf nodes of the quadtree in a linear
index (e.g., a B-tree) ordered based on a space-filling
curve. We present two complementary techniques: an
improved insertion algorithm and a bulk-loading method.
The bulk-loading method can be extended to handle
bulk-insertions into an existing PMR quadtree. We make
some analytical observations about the I/O cost and CPU
cost of our PMR quadtree bulk-loading algorithm, and
conduct an extensive empirical study of the techniques
presented in the paper. Our techniques are found to
yield significant speedup compared to traditional
quadtree building methods, even when the size of a main
memory buffer is very small compared to the size of the
resulting quadtrees.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "bulk-loading; I/O; spatial indexing",
}
@Article{Nanopoulos:2002:ESS,
author = "Alexandros Nanopoulos and Yannis Manolopoulos",
title = "Efficient similarity search for market basket data",
journal = j-VLDB-J,
volume = "11",
number = "2",
pages = "138--152",
month = oct,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0068-7",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:01 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011002/20110138.htm;
http://link.springer.de/link/service/journals/00778/papers/2011002/20110138.pdf",
abstract = "Several organizations have developed very large market
basket databases for the maintenance of customer
transactions. New applications, e.g., Web
recommendation systems, present the requirement for
processing similarity queries in market basket
databases. In this paper, we propose a novel scheme for
similarity search queries in basket data. We develop a
new representation method, which, in contrast to
existing approaches, is proven to provide correct
results. New algorithms are proposed for the processing
of similarity queries. Extensive experimental results,
for a variety of factors, illustrate the superiority of
the proposed scheme over the state-of-the-art method.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data mining; market basket data; nearest-neighbor;
similarity search",
}
@Article{Feng:2002:TMM,
author = "Ling Feng and Jeffrey Xu Yu and Hongjun Lu and Jiawei
Han",
title = "A template model for multidimensional
inter-transactional association rules",
journal = j-VLDB-J,
volume = "11",
number = "2",
pages = "153--175",
month = oct,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0069-6",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:01 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011002.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011002/20110153.htm;
http://link.springer.de/link/service/journals/00778/papers/2011002/20110153.pdf",
abstract = "Multidimensional inter-transactional association rules
extend the traditional association rules to describe
more general associations among items with multiple
properties across transactions. ``{\em After McDonald
and Burger King open branches, KFC will open a branch
two months later and one mile away}'' is an example of
such rules. Since the number of potential
inter-transactional association rules tends to be
extremely large, mining inter-transactional
associations poses more challenges on efficient
processing than mining traditional intra-transactional
associations. In order to make such association rule
mining truly practical and computationally tractable,
in this study we present a template model to help users
declare the interesting {\em multidimensional
inter-transactional associations\/} to be mined. With
the guidance of templates, several optimization
techniques, i.e., joining, converging, and speeding,
are devised to speed up the discovery of
inter-transactional association rules. We show, through
a series of experiments on both synthetic and real-life
data sets, that these optimization techniques can yield
significant performance benefits.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "intra-transactional/inter-transactional association
rules; multidimensional context; template model",
}
@Article{Apers:2002:E,
author = "Peter Apers and Stefano Ceri and Richard Snodgrass",
title = "Editorial",
journal = j-VLDB-J,
volume = "11",
number = "3",
pages = "177--178",
month = nov,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0075-8",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:02 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special issue VLDB best papers 2001.",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011003/20110177.htm;
http://link.springer.de/link/service/journals/00778/papers/2011003/20110177.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{An:2002:EPT,
author = "Ning An and Sudhanva Gurumurthi and Anand
Sivasubramaniam and Narayanan Vijaykrishnan and Mahmut
Kandemir and Mary Jane Irwin",
title = "Energy-performance trade-offs for spatial access
methods on memory-resident data",
journal = j-VLDB-J,
volume = "11",
number = "3",
pages = "179--197",
month = nov,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0073-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:02 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special issue VLDB best papers 2001.",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011003/20110179.htm;
http://link.springer.de/link/service/journals/00778/papers/2011003/20110179.pdf",
abstract = "The proliferation of mobile and pervasive computing
devices has brought energy constraints into the
limelight. Energy-conscious design is important at all
levels of system architecture, and the software has a
key role to play in conserving battery energy on these
devices. With the increasing popularity of spatial
database applications, and their anticipated deployment
on mobile devices (such as road atlases and GPS-based
applications), it is critical to examine the energy
implications of spatial data storage and access methods
for memory resident datasets. While there has been
extensive prior research on spatial access methods on
resource-rich environments, this is, perhaps, the first
study to examine their suitability for
resource-constrained environments. Using a detailed
cycle-accurate energy estimation framework and four
different datasets, this paper examines the pros and
cons of three previously proposed spatial indexing
alternatives from both the energy and performance
angles. Specifically, the Quadtree, Packed R-tree, and
Buddy-Tree structures are evaluated and compared with a
brute-force approach that does not use an index. The
results show that there are both performance and energy
trade-offs between the indexing schemes for the
different queries. The nature of the query also plays
an important role in determining the energy-performance
trade-offs. Further, technological trends and
architectural enhancements are influencing factors on
the relative behavior of the index structures. The work
in the query has a bearing on how and where (on a
mobile client or/and on a server) it should be
performed for performance and energy savings. The
results from this study will be beneficial for the
design and implementation of embedded spatial
databases, accelerating their deployment on numerous
mobile devices.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "energy optimization; multidimensional indexing;
resource-constrained computing; spatial data",
}
@Article{Ailamaki:2002:DPL,
author = "Anastassia Ailamaki and David J. DeWitt and Mark D.
Hill",
title = "Data page layouts for relational databases on deep
memory hierarchies",
journal = j-VLDB-J,
volume = "11",
number = "3",
pages = "198--215",
month = nov,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0074-9",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:02 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special issue VLDB best papers 2001.",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011003/20110198.htm;
http://link.springer.de/link/service/journals/00778/papers/2011003/20110198.pdf",
abstract = "Relational database systems have traditionally
optimized for I/O performance and organized records
sequentially on disk pages using the N-ary Storage
Model (NSM) (a.k.a., slotted pages). Recent research,
however, indicates that cache utilization and
performance is becoming increasingly important on
modern platforms. In this paper, we first demonstrate
that in-page data placement is the key to high cache
performance and that NSM exhibits low cache utilization
on modern platforms. Next, we propose a new data
organization model called PAX (Partition Attributes
Across), that significantly improves cache performance
by grouping together all values of each attribute
within each page. Because PAX only affects layout
inside the pages, it incurs no storage penalty and does
not affect I/O behavior. According to our experimental
results (which were obtained without using any indices
on the participating relations), when compared to NSM:
(a) PAX exhibits superior cache and memory bandwidth
utilization, saving at least 75\% of NSM's stall time
due to data cache accesses; (b) range selection queries
and updates on memory-resident relations execute 1725\%
faster; and (c) TPC-H queries involving I/O execute
1148\% faster. Finally, we show that PAX performs well
across different memory system designs.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cache-conscious database systems; disk page layout;
relational data placement",
}
@Article{Chirkova:2002:FPV,
author = "Rada Chirkova and Alon Y. Halevy and Dan Suciu",
title = "A formal perspective on the view selection problem",
journal = j-VLDB-J,
volume = "11",
number = "3",
pages = "216--237",
month = nov,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0070-0",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:02 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special issue VLDB best papers 2001.",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011003/20110216.htm;
http://link.springer.de/link/service/journals/00778/papers/2011003/20110216.pdf",
abstract = "The view selection problem is to choose a set of views
to materialize over a database schema, such that the
cost of evaluating a set of workload queries is
minimized and such that the views fit into a
prespecified storage constraint. The two main
applications of the view selection problem are
materializing views in a database to speed up query
processing, and selecting views to materialize in a
data warehouse to answer decision support queries. In
addition, view selection is a core problem for
intelligent data placement over a wide-area network for
data integration applications and data management for
ubiquitous computing. We describe several fundamental
results concerning the view selection problem. We
consider the problem for views and workloads that
consist of equality-selection, project and join
queries, and show that the complexity of the problem
depends crucially on the quality of the estimates that
a query optimizer has on the size of the views it is
considering to materialize. When a query optimizer has
good estimates of the sizes of the views, we show a
somewhat surprising result, namely, that an optimal
choice of views may involve a number of views that is
exponential in the size of the database schema. On the
other hand, when an optimizer uses standard estimation
heuristics, we show that the number of necessary views
and the expression size of each view are polynomially
bounded.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "materialized views; view selection",
}
@Article{Aguilera:2002:VLS,
author = "Vincent Aguilera and Sophie Cluet and Tova Milo and
Pierangelo Veltri and Dan Vodislav",
title = "Views in a large-scale {XML} repository",
journal = j-VLDB-J,
volume = "11",
number = "3",
pages = "238--255",
month = nov,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0065-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:02 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special issue VLDB best papers 2001.",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011003/20110238.htm;
http://link.springer.de/link/service/journals/00778/papers/2011003/20110238.pdf",
abstract = "We are interested in defining and querying views in a
huge and highly heterogeneous XML repository (Web
scale). In this context, view definitions are very
large, involving lots of sources, and there is no
apparent limitation to their size. This raises
interesting problems that we address in the paper: (i)
how to distribute views over several machines without
having a negative impact on the query translation
process; (ii) how to quickly select the relevant part
of a view given a query; (iii) how to minimize the cost
of communicating potentially large queries to the
machines where they will be evaluated. The solution
that we propose is based on a simple view definition
language that allows for automatic generation of views.
The language maps paths in the view abstract DTD to
paths in the concrete source DTDs. It enables a
distributed implementation of the view system that is
scalable both in terms of data and load. In particular,
the query translation algorithm is shown to have a good
(linear) complexity.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "query evaluation; semantic integration; views;
warehouse; XML",
}
@Article{Hunt:2002:DIL,
author = "Ela Hunt and Malcolm P. Atkinson and Robert W.
Irving",
title = "Database indexing for large {DNA} and protein sequence
collections",
journal = j-VLDB-J,
volume = "11",
number = "3",
pages = "256--271",
month = nov,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s007780200064",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:02 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011003.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
note = "Special issue VLDB best papers 2001.",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011003/20110256.htm;
http://link.springer.de/link/service/journals/00778/papers/2011003/20110256.pdf",
abstract = "Our aim is to develop new database technologies for
the approximate matching of unstructured string data
using indexes. We explore the potential of the suffix
tree data structure in this context. We present a new
method of building suffix trees, allowing us to build
trees in excess of RAM size, which has hitherto not
been possible. We show that this method performs in
practice as well as the $O(n)$ method of Ukkonen [70].
Using this method we build indexes for 200 Mb of
protein and 300 Mbp of DNA, whose disk-image exceeds
the available RAM. We show experimentally that suffix
trees can be effectively used in approximate string
matching with biological data. For a range of query
lengths and error bounds the suffix tree reduces the
size of the unoptimised $O(mn)$ dynamic programming
calculation required in the evaluation of string
similarity, and the gain from indexing increases with
index size. In the indexes we built this reduction is
significant, and less than 0.3\% of the expected matrix
is evaluated. We detail the requirements for further
database and algorithmic research to support efficient
use of large suffix indexes in biological
applications.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "approximate matching; biological sequence; database
index; suffix tree",
}
@Article{Halevy:2002:GE,
author = "Alon Y. Halevy",
title = "Guest Editorial",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "273--273",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0082-9",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110273.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110273.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Jagadish:2002:TNX,
author = "H. V. Jagadish and S. Al-Khalifa and A. Chapman and L.
V. S. Lakshmanan and A. Nierman and S. Paparizos and
J. M. Patel and D. Srivastava and N. Wiwatwattana and
Y. Wu and C. Yu",
title = "{TIMBER}: a native {XML} database",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "274--291",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0081-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110274.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110274.pdf",
abstract = "This paper describes the overall design and
architecture of the Timber XML database system
currently being implemented at the University of
Michigan. The system is based upon a bulk algebra for
manipulating trees, and natively stores XML. New access
methods have been developed to evaluate queries in the
XML context, and new cost estimation and query
optimization techniques have also been developed. We
present performance numbers to support some of our
design decisions. We believe that the key intellectual
contribution of this system is a comprehensive
set-at-a-time query processing ability in a native XML
store, with all the standard components of relational
query processing, including algebraic rewriting and a
cost-based optimizer.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "algebra; document management; hierarchical; query
processing; semi-structured",
}
@Article{Fiebig:2002:ANX,
author = "T. Fiebig and S. Helmer and C.-C. Kanne and G.
Moerkotte and J. Neumann and R. Schiele and T. Westmann",
title = "Anatomy of a native {XML} base management system",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "292--314",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0080-y",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110292.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110292.pdf",
abstract = "Several alternatives to manage large XML document
collections exist, ranging from file systems over
relational or other database systems to specifically
tailored XML base management systems. In this paper we
give a tour of Natix, a database management system
designed from scratch for storing and processing XML
data. Contrary to the common belief that management of
XML data is just another application for traditional
databases like relational systems, we illustrate how
almost every component in a database system is affected
in terms of adequacy and performance. We show how to
design and optimize areas such as storage, transaction
management --- comprising recovery and multi-user
synchronization --- as well as query processing for
XML.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database; XML",
}
@Article{Amer-Yahia:2002:TPQ,
author = "S. Amer-Yahia and S. Cho and L. V. S. Lakshmanan and
D. Srivastava",
title = "Tree pattern query minimization",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "315--331",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0076-7",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110315.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110315.pdf",
abstract = "Tree patterns form a natural basis to query
tree-structured data such as XML and LDAP. To improve
the efficiency of tree pattern matching, it is
essential to quickly identify and eliminate redundant
nodes in the pattern. In this paper, we study tree
pattern minimization both in the absence and in the
presence of integrity constraints (ICs) on the
underlying tree-structured database. In the absence of
ICs, we develop a polynomial-time query minimization
algorithm called CIM, whose efficiency stems from two
key properties: (i) a node cannot be redundant unless
its children are; and (ii) the order of elimination of
redundant nodes is immaterial. When ICs are considered
for minimization, we develop a technique for query
minimization based on three fundamental operations:
augmentation (an adaptation of the well-known chase
procedure), minimization (based on homomorphism
techniques), and reduction. We show the surprising
result that the algorithm, referred to as ACIM,
obtained by first augmenting the tree pattern using
ICs, and then applying CIM, always finds the unique
minimal equivalent query. While ACIM is polynomial
time, it can be expensive in practice because of its
inherent non-locality. We then present a fast
algorithm, CDM, that identifies and eliminates local
redundancies due to ICs, based on propagating
``information labels'' up the tree pattern. CDM can be
applied prior to ACIM for improving the minimization
efficiency. We complement our analytical results with
an experimental study that shows the effectiveness of
our tree pattern minimization techniques.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "query minimization; tree patterns; XML",
}
@Article{Chien:2002:ESM,
author = "S.-Y. Chien and V. J. Tsotras and C. Zaniolo",
title = "Efficient schemes for managing multiversion {XML}
documents",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "332--353",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0079-4",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110332.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110332.pdf",
abstract = "Multiversion support for XML documents is needed in
many critical applications, such as software
configuration control, cooperative authoring, web
information warehouses, and ``e-permanence'' of web
documents. In this paper, we introduce efficient and
robust techniques for: (i) storing and retrieving; (ii)
viewing and exchanging; and (iii) querying multiversion
XML documents. We first discuss the limitations of
traditional version control methods, such as RCS and
SCCS, and then propose novel techniques that overcome
their limitations. Initially, we focus on the problem
of managing secondary storage efficiently, and
introduce an {\em edit-based\/} versioning scheme that
enhances RCS with an effective clustering policy based
on the concept of page-usefulness. The new scheme
drastically improves version retrieval at the expense
of a small (linear) space overhead. However, the
edit-based approach falls short of achieving objectives
(ii) and (iii). Therefore, we introduce and investigate
a second scheme, which is reference-based and preserves
the structure of the original document. In the
reference-based approach, a multiversion document can
be represented as yet another XML document, which can
be easily exchanged and viewed on the web; furthermore,
simple queries are also expressed and supported well
under this representation. To achieve objective (i), we
extend the page-usefulness clustering technique to the
reference-based scheme. After characterizing the
asymptotic behavior of the new techniques proposed, the
paper presents the results of an experimental study
evaluating and comparing their performance.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "historical queries; temporal clustering; temporal
indexing; version management; XML database",
}
@Article{Chan:2002:EFX,
author = "C.-Y. Chan and P. Felber and M. Garofalakis and R.
Rastogi",
title = "Efficient filtering of {XML} documents with {XPath}
expressions",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "354--379",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0077-6",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110354.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110354.pdf",
abstract = "The publish/subscribe paradigm is a popular model for
allowing publishers (i.e., data generators) to
selectively disseminate data to a large number of
widely dispersed subscribers (i.e., data consumers) who
have registered their interest in specific information
items. Early publish/subscribe systems have typically
relied on simple subscription mechanisms, such as
keyword or ``bag of words'' matching, or simple
comparison predicates on attribute values. The
emergence of XML as a standard for information exchange
on the Internet has led to an increased interest in
using more expressive subscription mechanisms (e.g.,
based on XPath expressions) that exploit both the
structure and the content of published XML documents.
Given the increased complexity of these new
data-filtering mechanisms, the problem of effectively
identifying the subscription profiles that match an
incoming XML document poses a difficult and important
research challenge. In this paper, we propose a novel
index structure, termed XTrie, that supports the
efficient filtering of XML documents based on XPath
expressions. Our XTrie index structure offers several
novel features that, we believe, make it especially
attractive for large-scale publish/subscribe systems.
First, XTrie is designed to support effective filtering
based on complex XPath expressions (as opposed to
simple, single-path specifications). Second, our XTrie
structure and algorithms are designed to support both
ordered and unordered matching of XML data. Third, by
indexing on sequences of elements organized in a trie
structure and using a sophisticated matching algorithm,
XTrie is able to both reduce the number of unnecessary
index probes as well as avoid redundant matchings,
thereby providing extremely efficient filtering. Our
experimental results over a wide range of XML document
and XPath expression workloads demonstrate that our
XTrie index structure outperforms earlier approaches by
wide margins.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data dissemination; document filtering; index
structure; XML; XPath",
}
@Article{Ives:2002:XQE,
author = "Zachary G. Ives and A. Y. Halevy and D. S. Weld",
title = "An {XML} query engine for network-bound data",
journal = j-VLDB-J,
volume = "11",
number = "4",
pages = "380--402",
month = dec,
year = "2002",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0078-5",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:03 MDT 2008",
bibsource = "http://link.springer.de/link/service/journals/00778/tocs/t2011004.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/2011004/20110380.htm;
http://link.springer.de/link/service/journals/00778/papers/2011004/20110380.pdf",
abstract = "XML has become the lingua franca for data exchange and
integration across administrative and enterprise
boundaries. Nearly all data providers are adding XML
import or export capabilities, and standard XML Schemas
and DTDs are being promoted for all types of data
sharing. The ubiquity of XML has removed one of the
major obstacles to integrating data from widely
disparate sources --- namely, the heterogeneity of data
formats. However, general-purpose integration of data
across the wide are a also requires a query processor
that can query data sources on demand, receive streamed
XML data from them, and combine and restructure the
data into new XML output --- while providing good
performance for both batch-oriented and ad hoc,
interactive queries. This is the goal of the Tukwila
data integration system, the first system that focuses
on network-bound, dynamic XML data sources. In contrast
to previous approaches, which must read, parse, and
often store entire XML objects before querying them,
Tukwila can return query results even as the data is
streaming into the system. Tukwila is built with a new
system architecture that extends adaptive query
processing and relational-engine techniques into the
XML realm, as facilitated by a pair of operators that
incrementally evaluate a query's input path expressions
as data is read. In this paper, we describe the Tukwila
architecture and its novel aspects, and we
experimentally demonstrate that Tukwila provides better
overall query performance and faster initial answers
than existing systems, and has excellent scalability.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data integration; data streams; query processing; web
and databases; XML",
}
@Article{Ozsu:2003:NPA,
author = "M. Tamer {\"O}zsu",
title = "New partnership with {ACM} and update on the journal",
journal = j-VLDB-J,
volume = "12",
number = "1",
pages = "1--1",
month = may,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0089-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:05 MDT 2008",
bibsource = "http://link.springer-ny.com/link/service/journals/UNKNOWN/tocs/t3012001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/3012001/30120001.htm;
http://link.springer.de/link/service/journals/00778/papers/3012001/30120001.pdf",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Sheth:2003:CRK,
author = "A. Sheth and S. Thacker and S. Patel",
title = "Complex relationships and knowledge discovery support
in the {InfoQuilt} system",
journal = j-VLDB-J,
volume = "12",
number = "1",
pages = "2--27",
month = may,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0071-z",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:05 MDT 2008",
bibsource = "http://link.springer-ny.com/link/service/journals/UNKNOWN/tocs/t3012001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/3012001/30120002.htm;
http://link.springer.de/link/service/journals/00778/papers/3012001/30120002.pdf",
abstract = "Support for semantic content is becoming more common
in Web-accessible information systems. We see this
support emerging with the use of ontologies and
machine-readable, annotated documents. The practice of
domain modeling coupled with the extraction of
domain-specific, contextually relevant metadata also
supports the use of semantics. These advancements
enable knowledge discovery approaches that define
complex relationships between data that is autonomously
collected and managed. The InfoQuilt (One of the
incarnations of the InfoQuilt system, as applied to the
geographic information as part of the NSF Digital
Library II initiative is the ADEPT-UGA system [Ade].
This research was funded in part by National Science
Foundation grant IIS-9817432.) system supports one such
knowledge discovery approach. This paper presents
(parts of) the InfoQuilt system with the focus on its
use for modeling and utilizing complex semantic
inter-domain relationships to enable human-assisted
knowledge discovery over Web-accessible heterogeneous
data. This includes the specification and execution of
Information Scale (IScapes), a semantically rich
information request and correlation mechanism.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Verykios:2003:BDM,
author = "V. S. Verykios and G. V. Moustakides and M. G.
Elfeky",
title = "A {Bayesian} decision model for cost optimal record
matching",
journal = j-VLDB-J,
volume = "12",
number = "1",
pages = "28--40",
month = may,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0072-y",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:05 MDT 2008",
bibsource = "http://link.springer-ny.com/link/service/journals/UNKNOWN/tocs/t3012001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/3012001/30120028.htm;
http://link.springer.de/link/service/journals/00778/papers/3012001/30120028.pdf",
abstract = "In an error-free system with perfectly clean data, the
construction of a global view of the data consists of
linking --- in relational terms, joining --- two or
more tables on their key fields. Unfortunately, most of
the time, these data are neither carefully controlled
for quality nor necessarily defined commonly across
different data sources. As a result, the creation of
such a global data view resorts to approximate joins.
In this paper, an optimal solution is proposed for the
matching or the linking of database record pairs in the
presence of inconsistencies, errors or missing values
in the data. Existing models for record matching rely
on decision rules that minimize the probability of
error, that is the probability that a sample (a
measurement vector) is assigned to the wrong class. In
practice though, minimizing the probability of error is
not the best criterion to design a decision rule
because the misclassifications of different samples may
have different consequences. In this paper we present a
decision model that minimizes the cost of making a
decision. In particular: (a) we present a decision
rule: (b) we prove that this rule is optimal with
respect to the cost of a decision: and (c) we compute
the probabilities of the two types of errors (Type I
and Type II) that incur when this rule is applied. We
also present a closed form decision model for a certain
class of record comparison pairs along with an example,
and results from comparing the proposed cost-based
model to the error-based model, for large record
comparison spaces.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cost optimal statistical model; data cleaning; record
linkage",
}
@Article{Cui:2003:LTG,
author = "Y. Cui and J. Widom",
title = "Lineage tracing for general data warehouse
transformations",
journal = j-VLDB-J,
volume = "12",
number = "1",
pages = "41--58",
month = may,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0083-8",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:05 MDT 2008",
bibsource = "http://link.springer-ny.com/link/service/journals/UNKNOWN/tocs/t3012001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/3012001/30120041.htm;
http://link.springer.de/link/service/journals/00778/papers/3012001/30120041.pdf",
abstract = "Data warehousing systems integrate information from
operational data sources into a central repository to
enable analysis and mining of the integrated
information. During the integration process, source
data typically undergoes a series of {\em
transformations}, which may vary from simple algebraic
operations or aggregations to complex ``data
cleansing'' procedures. In a warehousing environment,
the {\em data lineage\/} problem is that of tracing
warehouse data items back to the original source items
from which they were derived. We formally define the
lineage tracing problem in the presence of general data
warehouse transformations, and we present algorithms
for lineage tracing in this environment. Our tracing
procedures take advantage of known structure or
properties of transformations when present, but also
work in the absence of such information. Our results
can be used as the basis for a lineage tracing tool in
a general warehousing setting, and also can guide the
design of data warehouses that enable efficient lineage
tracing.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data lineage; data warehouse; inverse; lineage
tracing; transformation",
}
@Article{Medjahed:2003:BBI,
author = "B. Medjahed and B. Benatallah and A. Bouguettaya and
A. H. H. Ngu and A. K. Elmagarmid",
title = "Business-to-business interactions: issues and enabling
technologies",
journal = j-VLDB-J,
volume = "12",
number = "1",
pages = "59--85",
month = may,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0087-z",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:05 MDT 2008",
bibsource = "http://link.springer-ny.com/link/service/journals/UNKNOWN/tocs/t3012001.htm;
http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://link.springer.de/link/service/journals/00778/bibs/3012001/30120059.htm;
http://link.springer.de/link/service/journals/00778/papers/3012001/30120059.pdf",
abstract = "Business-to-Business (B2B) technologies pre-date the
Web. They have existed for at least as long as the
Internet. B2B applications were among the first to take
advantage of advances in computer networking. The
Electronic Data Interchange (EDI) business standard is
an illustration of such an early adoption of the
advances in computer networking. The ubiquity and the
affordability of the Web has made it possible for the
masses of businesses to automate their B2B
interactions. However, several issues related to scale,
content exchange, autonomy, heterogeneity, and other
issues still need to be addressed. In this paper, we
survey the main techniques, systems, products, and
standards for B2B interactions. We propose a set of
criteria for assessing the different B2B interaction
techniques, standards, and products.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "B2B interactions; components; e-commerce; EDI; Web
services; workflows; XML",
}
@Article{Bernstein:2003:GE,
author = "Philip A. Bernstein and Yannis Ioannidis and Raghu
Ramakrishnan",
title = "Guest editorial",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "87--88",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0092-2",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Ramamurthy:2003:CFM,
author = "Ravishankar Ramamurthy and David J. DeWitt and Qi
Su",
title = "A case for fractured mirrors",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "89--101",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0093-1",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The decomposition storage model (DSM) vertically
partitions all attributes of a table and has excellent
I/O behavior when the number of attributes accessed by
a query is small. It also has a better cache footprint
than the standard storage model (NSM) used by most
database systems. However, DSM incurs a high cost in
reconstructing the original tuple from its partitions.
We first revisit some of the performance problems
associated with DSM and suggest a simple indexing
strategy and compare different reconstruction
algorithms. Then we propose a new mirroring scheme,
termed fractured mirrors, using both NSM and DSM
models. This scheme combines the best aspects of both
models, along with the added benefit of mirroring to
better serve an ad hoc query workload. A prototype
system has been built using the Shore storage manager,
and performance is evaluated using queries from the
TPC-H workload.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data placement; disk mirroring; vertical
partitioning",
}
@Article{Chan:2003:RTE,
author = "Chee-Yong Chan and Minos Garofalakis and Rajeev
Rastogi",
title = "{RE}-tree: an efficient index structure for regular
expressions",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "102--119",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0094-0",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Due to their expressive power, regular expressions
(REs) are quickly becoming an integral part of language
specifications for several important application
scenarios. Many of these applications have to manage
huge databases of RE specifications and need to provide
an effective matching mechanism that, given an input
string, quickly identifies the REs in the database that
match it. In this paper, we propose the RE-tree, a
novel index structure for large databases of RE
specifications. Given an input query string, the
RE-tree speeds up the retrieval of matching REs by
focusing the search and comparing the input string with
only a small fraction of REs in the database. Even
though the RE-tree is similar in spirit to other
tree-based structures that have been proposed for
indexing multidimensional data, RE indexing is
significantly more challenging since REs typically
represent infinite sets of strings with no well-defined
notion of spatial locality. To address these new
challenges, our RE-tree index structure relies on novel
measures for comparing the relative sizes of infinite
regular languages. We also propose innovative solutions
for the various RE-tree operations including the
effective splitting of RE-tree nodes and computing a
`tight' bounding RE for a collection of REs. Finally,
we demonstrate how sampling-based approximation
algorithms can be used to significantly speed up the
performance of RE-tree operations. Preliminary
experimental results with moderately large synthetic
data sets indicate that the RE-tree is effective in
pruning the search space and easily outperforms naive
sequential search approaches.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "index structure; regular expressions; sampling-based
approximations; size measures",
}
@Article{Abadi:2003:ANM,
author = "Daniel J. Abadi and Don Carney and Ugur
{\c{C}}etintemel and Mitch Cherniack and Christian
Convey and Sangdon Lee and Michael Stonebraker and
Nesime Tatbul and Stan Zdonik",
title = "{Aurora}: a new model and architecture for data stream
management",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "120--139",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0095-z",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "This paper describes the basic processing model and
architecture of Aurora, a new system to manage data
streams for monitoring applications. Monitoring
applications differ substantially from conventional
business data processing. The fact that a software
system must process and react to continual inputs from
many sources (e.g., sensors) rather than from human
operators requires one to rethink the fundamental
architecture of a DBMS for this application area. In
this paper, we present Aurora, a new DBMS currently
under construction at Brandeis University, Brown
University, and M.I.T. We first provide an overview of
the basic Aurora model and architecture and then
describe in detail a stream-oriented set of
operators.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "continuous queries; data stream management; database
triggers; quality-of-service; real-time systems",
}
@Article{Chandrasekaran:2003:PSS,
author = "Sirish Chandrasekaran and Michael J. Franklin",
title = "{PSoup}: a system for streaming queries over streaming
data",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "140--156",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0096-y",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Recent work on querying data streams has focused on
systems where newly arriving data is processed and
continuously streamed to the user in real time. In many
emerging applications, however, ad hoc queries and/or
intermittent connectivity also require the processing
of data that arrives prior to query submission or
during a period of disconnection. For such
applications, we have developed PSoup, a system that
combines the processing of ad hoc and continuous
queries by treating data and queries symmetrically,
allowing new queries to be applied to old data and new
data to be applied to old queries. PSoup also supports
intermittent connectivity by separating the computation
of query results from the delivery of those results.
PSoup builds on adaptive query-processing techniques
developed in the Telegraph project at UC Berkeley. In
this paper, we describe PSoup and present experiments
that demonstrate the effectiveness of our approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "disconnected operation; query-data duality; stream
query processing",
}
@Article{Agrawal:2003:WRD,
author = "Rakesh Agrawal and Peter J. Haas and Jerry Kiernan",
title = "Watermarking relational data: framework, algorithms
and analysis",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "157--169",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0097-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "We enunciate the need for watermarking database
relations to deter data piracy, identify the
characteristics of relational data that pose unique
challenges for watermarking, and delineate desirable
properties of a watermarking system for relational
data. We then present an effective watermarking
technique geared for relational data. This technique
ensures that some bit positions of some of the
attributes of some of the tuples contain specific
values. The specific bit locations and values are
algorithmically determined under the control of a
secret key known only to the owner of the data. This
bit pattern constitutes the watermark. Only if one has
access to the secret key can the watermark be detected
with high probability. Detecting the watermark requires
access neither to the original data nor the watermark,
and the watermark can be easily and efficiently
maintained in the presence of insertions, updates, and
deletions. Our analysis shows that the proposed
technique is robust against various forms of malicious
attacks as well as benign updates to the data. Using an
implementation running on DB2, we also show that the
algorithms perform well enough to be used in real-world
applications.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "database; information hiding; steganography;
watermarking",
}
@Article{Chakrabarti:2003:FAT,
author = "Soumen Chakrabarti and Shourya Roy and Mahesh V.
Soundalgekar",
title = "Fast and accurate text classification via multiple
linear discriminant projections",
journal = j-VLDB-J,
volume = "12",
number = "2",
pages = "170--185",
month = aug,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0098-9",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:06 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Support vector machines (SVMs) have shown superb
performance for text classification tasks. They are
accurate, robust, and quick to apply to test instances.
Their only potential drawback is their training time
and memory requirement. For $n$ training instances held
in memory, the best-known SVM implementations take time
proportional to $n^a$, where $a$ is typically between
1.8 and 2.1. SVMs have been trained on data sets with
several thousand instances, but Web directories today
contain millions of instances that are valuable for
mapping billions of Web pages into Yahoo!-like
directories. We present SIMPL, a nearly linear-time
classification algorithm that mimics the strengths of
SVMs while avoiding the training bottleneck. It uses
Fisher's linear discriminant, a classical tool from
statistical pattern recognition, to project training
instances to a carefully selected low-dimensional
subspace before inducing a decision tree on the
projected instances. SIMPL uses efficient sequential
scans and sorts and is comparable in speed and memory
scalability to widely used naive Bayes (NB)
classifiers, but it beats NB accuracy decisively. It
not only approaches and sometimes exceeds SVM accuracy,
but also beats the running time of a popular SVM
implementation by orders of magnitude. While describing
SIMPL, we make a detailed experimental comparison of
SVM-generated discriminants with Fisher's
discriminants, and we also report on an analysis of the
cache performance of a popular SVM implementation. Our
analysis shows that SIMPL has the potential to be the
method of choice for practitioners who want the
accuracy of SVMs and the simplicity and speed of naive
Bayes classifiers.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "discriminative learning; linear discriminants; text
classification",
}
@Article{Fung:2003:CDV,
author = "Chi-Wai Fung and Kamalakar Karlapalem and Qing Li",
title = "Cost-driven vertical class partitioning for methods in
object oriented databases",
journal = j-VLDB-J,
volume = "12",
number = "3",
pages = "187--210",
month = oct,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0084-7",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:07 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In object-oriented databases (OODBs), a method
encapsulated in a class typically accesses a few, but
not all the instance variables defined in the class. It
may thus be preferable to vertically partition the
class for reducing irrelevant data (instance variables)
accessed by the methods. Our prior work has shown that
vertical class partitioning can result in a substantial
decrease in the total number of disk accesses incurred
for executing a set of applications, but coming up with
an optimal vertical class partitioning scheme is a hard
problem. In this paper, we present two algorithms for
deriving optimal and near-optimal vertical class
partitioning schemes. The cost-driven algorithm
provides the optimal vertical class partitioning
schemes by enumerating, exhaustively, all the schemes
and calculating the number of disk accesses required to
execute a given set of applications. For this, a cost
model for executing a set of methods in an OODB system
is developed. Since exhaustive enumeration is costly
and only works for classes with a small number of
instance variables, a hill-climbing heuristic algorithm
(HCHA) is developed, which takes the solution provided
by the affinity-based algorithm and improves it,
thereby further reducing the total number of disk
accesses incurred. We show that the HCHA algorithm
provides a reasonable near-optimal vertical class
partitioning scheme for executing a given set of
applications.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "affinity-based; analytical cost model; cost-driven;
hill-climbing heuristic algorithm; method-induced;
object-oriented databases; vertical class
partitioning",
}
@Article{Li:2003:CCA,
author = "Chen Li",
title = "Computing complete answers to queries in the presence
of limited access patterns",
journal = j-VLDB-J,
volume = "12",
number = "3",
pages = "211--227",
month = oct,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-002-0085-6",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:07 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In data applications such as information integration,
there can be limited access patterns to relations,
i.e., binding patterns require values to be specified
for certain attributes in order to retrieve data from a
relation. As a consequence, we cannot retrieve all
tuples from these relations. In this article we study
the problem of computing the {\em complete\/} answer to
a query, i.e., the answer that could be computed if all
the tuples could be retrieved. A query is {\em
stable\/} if for any instance of the relations in the
query, its complete answer can be computed using the
access patterns permitted by the relations. We study
the problem of testing stability of various classes of
queries, including conjunctive queries, unions of
conjunctive queries, and conjunctive queries with
arithmetic comparisons. We give algorithms and
complexity results for these classes of queries. We
show that stability of datalog programs is undecidable,
and give a sufficient condition for stability of
datalog queries. Finally, we study data-dependent
computability of the complete answer to a nonstable
query, and propose a decision tree for guiding the
process to compute the complete answer.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "complete answers to queries; limited access patterns
to relations; query stability",
}
@Article{Chua:2003:IBA,
author = "Cecil Eng H. Chua and Roger H. L. Chiang and Ee-Peng
Lim",
title = "Instance-based attribute identification in database
integration",
journal = j-VLDB-J,
volume = "12",
number = "3",
pages = "228--243",
month = oct,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0088-y",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:07 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Most research on attribute identification in database
integration has focused on integrating attributes using
schema and summary information derived from the
attribute values. No research has attempted to fully
explore the use of attribute values to perform
attribute identification. We propose an attribute
identification method that employs schema and summary
instance information as well as properties of
attributes derived from their instances. Unlike other
attribute identification methods that match only single
attributes, our method matches attribute groups for
integration. Because our attribute identification
method fully explores data instances, it can identify
corresponding attributes to be integrated even when
schema information is misleading. Three experiments
were performed to validate our attribute identification
method. In the first experiment, the heuristic rules
derived for attribute classification were evaluated on
119 attributes from nine public domain data sets. The
second was a controlled experiment validating the
robustness of the proposed attribute identification
method by introducing erroneous data. The third
experiment evaluated the proposed attribute
identification method on five data sets extracted from
online music stores. The results demonstrated the
viability of the proposed method.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "attribute identification; database integration;
measures of association",
}
@Article{Helmer:2003:PSF,
author = "Sven Helmer and Guido Moerkotte",
title = "A performance study of four index structures for
set-valued attributes of low cardinality",
journal = j-VLDB-J,
volume = "12",
number = "3",
pages = "244--261",
month = oct,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0106-0",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:07 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The efficient retrieval of data items on set-valued
attributes is an important research topic that has
attracted little attention so far. We studied and
modified four index structures (sequential signature
files, signature trees, extendible signature hashing,
and inverted files) for a fast retrieval of sets with
low cardinality. We compared the index structures by
implementing them and subjecting them to extensive
experiments, investigating the influence of query set
size, database size, domain size, and data distribution
(synthetic and real). The results of the experiments
clearly indicate that inverted files exhibit the best
overall behavior of all tested index structures.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; database management systems; index
structures; physical design; set-valued attributes",
}
@Article{Yang:2003:ICM,
author = "Jun Yang and Jennifer Widom",
title = "Incremental computation and maintenance of temporal
aggregates",
journal = j-VLDB-J,
volume = "12",
number = "3",
pages = "262--283",
month = oct,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0107-z",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:07 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "We consider the problems of computing aggregation
queries in temporal databases and of maintaining
materialized temporal aggregate views efficiently. The
latter problem is particularly challenging since a
single data update can cause aggregate results to
change over the entire time line. We introduce a new
index structure called the {\em SB-tree}, which
incorporates features from both {\em segment-trees\/}
and {\em B-trees}. SB-trees support fast lookup of
aggregate results based on time and can be maintained
efficiently when the data change. We extend the basic
SB-tree index to handle {\em cumulative\/} (also called
{\em moving-window\/}) aggregates, considering
separatelycases when the window size is or is not fixed
in advance. For materialized aggregate views in a
temporal database or warehouse, we propose building and
maintaining SB-tree indices instead of the views
themselves.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; aggregation; B-tree; segment tree;
temporal database; view maintenance",
}
@Article{Atluri:2003:GE,
author = "Vijay Atluri and Anupam Joshi and Yelena Yesha",
title = "Guest editorial",
journal = j-VLDB-J,
volume = "12",
number = "4",
pages = "285--285",
month = nov,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0109-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:08 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Maedche:2003:MMD,
author = "A. Maedche and B. Motik and L. Stojanovic",
title = "Managing multiple and distributed ontologies on the
{Semantic Web}",
journal = j-VLDB-J,
volume = "12",
number = "4",
pages = "286--302",
month = nov,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0102-4",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:08 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In traditional software systems, significant attention
is devoted to keeping modules well separated and
coherent with respect to functionality, thus ensuring
that changes in the system are localized to a handful
of modules. Reuse is seen as the key method in reaching
that goal. Ontology-based systems on the Semantic Web
are just a special class of software systems, so the
same principles apply. In this article, we present an
integrated framework for managing multiple and
distributed ontologies on the Semantic Web. It is based
on the representation model for ontologies, trading off
between expressivity and tractability. In our
framework, we provide features for reusing existing
ontologies and for evolving them while retaining the
consistency. The approach is implemented within KAON,
the Karlsruhe Ontology and Semantic Web tool suite.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "multiple and distributed ontologies; ontology
evolution",
}
@Article{Doan:2003:LMO,
author = "AnHai Doan and Jayant Madhavan and Robin Dhamankar and
Pedro Domingos and Alon Halevy",
title = "Learning to match ontologies on the {Semantic Web}",
journal = j-VLDB-J,
volume = "12",
number = "4",
pages = "303--319",
month = nov,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0104-2",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:08 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "On the Semantic Web, data will inevitably come from
many different ontologies, and information processing
across ontologies is not possible without knowing the
semantic mappings between them. Manually finding such
mappings is tedious, error-prone, and clearly not
possible on the Web scale. Hence the development of
tools to assist in the ontology mapping process is
crucial to the success of the Semantic Web. We describe
{\em GLUE}, a system that employs machine learning
techniques to find such mappings. Given two ontologies,
for each concept in one ontology {\em GLUE\/} finds the
most similar concept in the other ontology. We give
well-founded probabilistic definitions to several
practical similarity measures and show that {\em
GLUE\/} can work with all of them. Another key feature
of {\em GLUE\/} is that it uses multiple learning
strategies, each of which exploits well a different
type of information either in the data instances or in
the taxonomic structure of the ontologies. To further
improve matching accuracy, we extend {\em GLUE\/} to
incorporate common-sense knowledge and domain
constraints into the matching process. Our approach is
thus distinguished in that it works with a variety of
well-defined similarity notions and that it efficiently
incorporates multiple types of knowledge. We describe a
set of experiments on several real-world domains and
show that {\em GLUE\/} proposes highly accurate
semantic mappings. Finally, we extend {\em GLUE\/} to
find complex mappings between ontologies and describe
experiments that show the promise of the approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "machine learning; ontology matching; relaxation
labeling; Semantic Web",
}
@Article{Halkidi:2003:TOW,
author = "Maria Halkidi and Benjamin Nguyen and Iraklis Varlamis
and Michalis Vazirgiannis",
title = "{THESUS}: Organizing {Web} document collections based
on link semantics",
journal = j-VLDB-J,
volume = "12",
number = "4",
pages = "320--332",
month = nov,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0100-6",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:08 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The requirements for effective search and management
of the WWW are stronger than ever. Currently Web
documents are classified based on their content not
taking into account the fact that these documents are
connected to each other by links. We claim that a
page's classification is enriched by the detection of
its incoming links' semantics. This would enable
effective browsing and enhance the validity of search
results in the WWW context. Another aspect that is
underaddressed and strictly related to the tasks of
browsing and searching is the similarity of documents
at the semantic level. The above observations lead us
to the adoption of a hierarchy of concepts (ontology)
and a thesaurus to exploit links and provide a better
characterization of Web documents. The enhancement of
document characterization makes operations such as
clustering and labeling very interesting. To this end,
we devised a system called THESUS. The system deals
with an initial sets of Web documents, extracts
keywords from all pages' incoming links, and converts
them to semantics by mapping them to a domain's
ontology. Then a clustering algorithm is applied to
discover groups of Web documents. The effectiveness of
the clustering process is based on the use of a novel
similarity measure between documents characterized by
sets of terms. Web documents are organized into
thematic subsets based on their semantics. The subsets
are then labeled, thereby enabling easier management
(browsing, searching, querying) of the Web. In this
article, we detail the process of this system and give
an experimental analysis of its results.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "document clustering; link analysis; link management;
semantics; similarity measure; World Wide Web",
}
@Article{Medjahed:2003:CWS,
author = "Brahim Medjahed and Athman Bouguettaya and Ahmed K.
Elmagarmid",
title = "Composing {Web} services on the {Semantic Web}",
journal = j-VLDB-J,
volume = "12",
number = "4",
pages = "333--351",
month = nov,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0101-5",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:08 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Service composition is gaining momentum as the
potential {\em silver bullet\/} for the envisioned {\em
Semantic Web}. It purports to take the Web to
unexplored efficiencies and provide a flexible approach
for promoting all types of activities in tomorrow's
Web. Applications expected to heavily take advantage of
Web service composition include B2B E-commerce and
E-government. To date, enabling composite services has
largely been an ad hoc, time-consuming, and error-prone
process involving repetitive low-level programming. In
this paper, we propose an {\em ontology\/}-based
framework for the automatic composition of Web
services. We present a technique to generate composite
services from high-level declarative descriptions. We
define formal safeguards for meaningful composition
through the use of {\em composability\/} rules. These
rules compare the {\em syntactic\/} and {\em
semantic\/} features of Web services to determine
whether two services are composable. We provide an
implementation using an E-government application
offering customized services to indigent citizens.
Finally, we present an exhaustive performance
experiment to assess the scalability of our approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "ontology; Semantic Web; service composition; Web
services",
}
@Article{Fileto:2003:POW,
author = "Renato Fileto and Ling Liu and Calton Pu and Eduardo
Delgado Assad and Claudia Bauzer Medeiros",
title = "{POESIA}: an ontological workflow approach for
composing {Web} services in agriculture",
journal = j-VLDB-J,
volume = "12",
number = "4",
pages = "352--367",
month = nov,
year = "2003",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0103-3",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:08 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "This paper describes the POESIA approach to systematic
composition of Web services. This pragmatic approach is
strongly centered in the use of domain-specific
multidimensional ontologies. Inspired by applications
needs and founded on ontologies, workflows, and
activity models, POESIA provides well-defined
operations (aggregation, specialization, and
instantiation) to support the composition of Web
services. POESIA complements current proposals for Web
services definition and composition by providing a
higher degree of abstraction with verifiable
consistency properties. We illustrate the POESIA
approach using a concrete application scenario in
agroenvironmental planning.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "composition of Web services; data integration;
ontologies; Semantic Web; semantics of data and
processes",
}
@Article{Jensen:2004:MDM,
author = "Christian S. Jensen and Augustas Kligys and Torben
Bach Pedersen and Igor Timko",
title = "Multidimensional data modeling for location-based
services",
journal = j-VLDB-J,
volume = "13",
number = "1",
pages = "1--21",
month = jan,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0091-3",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:09 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "With the recent and continuing advances in areas such
as wireless communications and positioning
technologies, mobile, location-based services are
becoming possible. Such services deliver
location-dependent content to their users. More
specifically, these services may capture the movements
and requests of their users in multidimensional
databases, i.e., data warehouses, and content delivery
may be based on the results of complex queries on these
data warehouses. Such queries aggregate detailed data
in order to find useful patterns, e.g., in the
interaction of a particular user with the services. The
application of multidimensional technology in this
context poses a range of new challenges. The specific
challenge addressed here concerns the provision of an
appropriate multidimensional data model. In particular,
the paper extends an existing multidimensional data
model and algebraic query language to accommodate
spatial values that exhibit partial containment
relationships instead of the total containment
relationships normally assumed in multidimensional data
models. Partial containment introduces imprecision in
aggregation paths. The paper proposes a method for
evaluating the imprecision of such paths. The paper
also offers transformations of dimension hierarchies
with partial containment relationships to simple
hierarchies, to which existing precomputation
techniques are applicable.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data modeling; location-based services;
multidimensional data; partial containment",
}
@Article{Zhang:2004:PMV,
author = "Xin Zhang and Lingli Ding and Elke A. Rundensteiner",
title = "Parallel multisource view maintenance",
journal = j-VLDB-J,
volume = "13",
number = "1",
pages = "22--48",
month = jan,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0086-0",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:09 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In a distributed environment, materialized views are
used to integrate data from different information
sources and then store them in some centralized
location. In order to maintain such materialized views,
maintenance queries need to be sent to information
sources by the data warehouse management system. Due to
the independence of the information sources and the
data warehouse, concurrency issues are raised between
the maintenance queries and the local update
transactions at each information source. Recent
solutions such as ECA and Strobe tackle such concurrent
maintenance, however with the requirement of quiescence
of the information sources. SWEEP and POSSE overcome
this limitation by decomposing the global maintenance
query into smaller subqueries to be sent to every
information source and then performing conflict
correction locally at the data warehouse. Note that all
these previous approaches handle the data updates {\em
one at a time}. Hence either some of the information
sources or the data warehouse is likely to be idle
during most of the maintenance process. In this paper,
we propose that a set of updates should be maintained
in parallel by several concurrent maintenance processes
so that both the information sources as well as the
warehouse would be utilized more fully throughout the
maintenance process. This parallelism should then
improve the overall maintenance performance. For this
we have developed a parallel view maintenance
algorithm, called PVM, that substantially improves upon
the performance of previous maintenance approaches by
handling a set of data updates at the same time. The
parallel handling of a set of updates is orthogonal to
the particular maintenance algorithm applied to the
handling of each individual update. In order to perform
parallel view maintenance, we have identified two
critical issues that must be overcome: (1) detecting
maintenance-concurrent data updates in a parallel mode
and (2) correcting the problem that the data warehouse
commit order may not correspond to the data warehouse
update processing order due to parallel maintenance
handling. In this work, we provide solutions to both
issues. For the former, we insert a middle-layer
timestamp assignment module for detecting
maintenance-concurrent data updates without requiring
any global clock synchronization. For the latter, we
introduce the negative counter concept to solve the
problem of variant orders of committing effects of data
updates to the data warehouse. We provide a proof of
the correctness of PVM that guarantees that our
strategy indeed generates the correct final data
warehouse state. We have implemented both SWEEP and PVM
in our EVE data warehousing system. Our performance
study demonstrates that a manyfold performance
improvement is achieved by PVM over SWEEP.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrent data updates; data warehousing; parallel
view maintenance; performance evaluation",
}
@Article{Hristidis:2004:AAA,
author = "Vagelis Hristidis and Yannis Papakonstantinou",
title = "Algorithms and applications for answering ranked
queries using ranked views",
journal = j-VLDB-J,
volume = "13",
number = "1",
pages = "49--70",
month = jan,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0099-8",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:09 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Ranked queries return the top objects of a database
according to a preference function. We present and
evaluate (experimentally and theoretically) a core
algorithm that answers ranked queries in an efficient
pipelined manner using materialized ranked views. We
use and extend the core algorithm in the described
PREFER and MERGE systems. PREFER precomputes a set of
materialized views that provide guaranteed query
performance. We present an algorithm that selects a
near optimal set of views under space constraints. We
also describe multiple optimizations and implementation
aspects of the downloadable version of PREFER. Then we
discuss MERGE, which operates at a metabroker and
answers ranked queries by retrieving a minimal number
of objects from sources that offer ranked queries. A
speculative version of the pipelining algorithm is
described.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "materialization; merge ranked views; ranked queries",
}
@Article{Khan:2004:REO,
author = "Latifur Khan and Dennis McLeod and Eduard Hovy",
title = "Retrieval effectiveness of an ontology-based model for
information selection",
journal = j-VLDB-J,
volume = "13",
number = "1",
pages = "71--85",
month = jan,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0105-1",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:09 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Technology in the field of digital media generates
huge amounts of nontextual information, audio, video,
and images, along with more familiar textual
information. The potential for exchange and retrieval
of information is vast and daunting. The key problem in
achieving efficient and user-friendly retrieval is the
development of a search mechanism to guarantee delivery
of minimal irrelevant information (high precision)
while insuring relevant information is not overlooked
(high recall). The traditional solution employs
keyword-based search. The only documents retrieved are
those containing user-specified keywords. But many
documents convey desired semantic information without
containing these keywords. This limitation is
frequently addressed through query expansion mechanisms
based on the statistical co-occurrence of terms. Recall
is increased, but at the expense of deteriorating
precision. One can overcome this problem by indexing
documents according to context and meaning rather than
keywords, although this requires a method of converting
words to meanings and the creation of a meaning-based
index structure. We have solved the problem of an index
structure through the design and implementation of a
concept-based model using domain-dependent ontologies.
An ontology is a collection of concepts and their
interrelationships that provide an abstract view of an
application domain. With regard to converting words to
meaning, the key issue is to identify appropriate
concepts that both describe and identify documents as
well as language employed in user requests. This paper
describes an automatic mechanism for selecting these
concepts. An important novelty is a scalable
disambiguation algorithm that prunes irrelevant
concepts and allows relevant ones to associate with
documents and participate in query generation. We also
propose an automatic query expansion mechanism that
deals with user requests expressed in natural language.
This mechanism generates database queries with
appropriate and relevant expansion through knowledge
encoded in ontology form. Focusing on audio data, we
have constructed a demonstration prototype. We have
experimentally and analytically shown that our model,
compared to keyword search, achieves a significantly
higher degree of precision and recall. The techniques
employed can be applied to the problem of information
selection in all media types.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "audio; metadata; ontology; precision; recall; SQL",
}
@Article{Donderler:2004:RBS,
author = "Mehmet Emin D{\"o}nderler and {\"O}zg{\"u}r Ulusoy and
Ugur G{\"u}d{\"u}kbay",
title = "Rule-based spatiotemporal query processing for video
databases",
journal = j-VLDB-J,
volume = "13",
number = "1",
pages = "86--103",
month = jan,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0114-0",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:09 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In our earlier work, we proposed an architecture for a
Web-based video database management system (VDBMS)
providing an integrated support for spatiotemporal and
semantic queries. In this paper, we focus on the task
of spatiotemporal query processing and also propose an
SQL-like video query language that has the capability
to handle a broad range of spatiotemporal queries. The
language is rule-based in that it allows users to
express spatial conditions in terms of Prolog-type
predicates. Spatiotemporal query processing is carried
out in three main stages: query recognition, query
decomposition, and query execution.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "content-based retrieval; inference rules; multimedia
databases; spatiotemporal query processing; video
databases",
}
@Article{Yu:2004:QHD,
author = "Cui Yu and St{\'e}phane Bressan and Beng Chin Ooi and
Kian-Lee Tan",
title = "Querying high-dimensional data in single-dimensional
space",
journal = j-VLDB-J,
volume = "13",
number = "2",
pages = "105--119",
month = may,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0121-9",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:10 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In this paper, we propose a new tunable index scheme,
called iMinMax($\theta$), that maps points in
high-dimensional spaces to single-dimensional values
determined by their maximum or minimum values among all
dimensions. By varying the tuning ``knob'', $\theta$,
we can obtain different families of iMinMax structures
that are optimized for different distributions of data
sets. The transformed data can then be indexed using
existing single-dimensional indexing structures such as
the B$^+$-trees. Queries in the high-dimensional space
have to be transformed into queries in the
single-dimensional space and evaluated there. We
present efficient algorithms for evaluating window
queries as range queries on the single-dimensional
space. We conducted an extensive performance study to
evaluate the effectiveness of the proposed schemes. Our
results show that iMinMax($\theta$) outperforms
existing techniques, including the Pyramid scheme and
VA-file, by a wide margin. We then describe how iMinMax
could be used in approximate K-nearest neighbor (KNN)
search, and we present a comparative study against the
recently proposed iDistance, a specialized KNN indexing
method.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "edge; high-dimensional data; iMinMax($\theta$);
single-dimensional space; window and KNN queries",
}
@Article{Dori:2004:VVS,
author = "Dov Dori",
title = "{ViSWeb} --- the {Visual Semantic Web}: unifying human
and machine knowledge representations with
{Object-Process Methodology}",
journal = j-VLDB-J,
volume = "13",
number = "2",
pages = "120--147",
month = may,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0120-x",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:10 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The Visual Semantic Web (ViSWeb) is a new paradigm for
enhancing the current Semantic Web technology. Based on
Object-Process Methodology (OPM), which enables
modeling of systems in a single graphic and textual
model, ViSWeb provides for representation of knowledge
over the Web in a unified way that caters to human
perceptions while also being machine processable. The
advantages of the ViSWeb approach include equivalent
graphic-text knowledge representation, visual
navigability, semantic sentence interpretation,
specification of system dynamics, and complexity
management. Arguing against the claim that humans and
machines need to look at different knowledge
representation formats, the principles and basics of
various graphic and textual knowledge representations
are presented and examined as candidates for ViSWeb
foundation. Since OPM is shown to be most adequate for
the task, ViSWeb is developed as an OPM-based layer on
top of XML/RDF/OWL to express knowledge visually and in
natural language. Both the graphic and the textual
representations are strictly equivalent. Being
intuitive yet formal, they are not only understandable
to humans but are also amenable to computer processing.
The ability to use such bimodal knowledge
representation is potentially a major step forward in
the evolution of the Semantic Web.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "conceptual graphs; knowledge representation;
object-process methodology; Semantic Web; Visual
Semantic Web",
}
@Article{Fu:2004:EHA,
author = "Lixin Fu and Sanguthevar Rajasekaran",
title = "Evaluating holistic aggregators efficiently for very
large datasets",
journal = j-VLDB-J,
volume = "13",
number = "2",
pages = "148--161",
month = may,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0112-2",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:10 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In data warehousing applications, numerous OLAP
queries involve the processing of holistic aggregators
such as computing the ``top $n$,'' median, quantiles,
etc. In this paper, we present a novel approach called
dynamic bucketing to efficiently evaluate these
aggregators. We partition data into equiwidth buckets
and further partition dense buckets into subbuckets as
needed by allocating and reclaiming memory space. The
bucketing process dynamically adapts to the input order
and distribution of input datasets. The histograms of
the buckets and subbuckets are stored in our new data
structure called structure trees. A recent selection
algorithm based on regular sampling is generalized and
its analysis extended. We have also compared our new
algorithms with this generalized algorithm and several
other recent algorithms. Experimental results show that
our new algorithms significantly outperform prior ones
not only in the runtime but also in accuracy.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "aggregation; dynamic bucketing; quantiles",
}
@Article{Rahal:2004:ETU,
author = "Amira Rahal and Qiang Zhu and Per-{\AA}ke Larson",
title = "Evolutionary techniques for updating query cost models
in a dynamic multidatabase environment",
journal = j-VLDB-J,
volume = "13",
number = "2",
pages = "162--176",
month = may,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0110-4",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:10 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Deriving local cost models for query optimization in a
dynamic multidatabase system (MDBS) is a challenging
issue. In this paper, we study how to evolve a query
cost model to capture a slowly-changing dynamic MDBS
environment so that the cost model is kept up-to-date
all the time. Two novel evolutionary techniques, i.e.,
the shifting method and the block-moving method, are
proposed. The former updates a cost model by taking
up-to-date information from a new sample query into
consideration at each step, while the latter considers
a block (batch) of new sample queries at each step. The
relevant issues, including derivation of recurrence
updating formulas, development of efficient algorithms,
analysis and comparison of complexities, and design of
an integrated scheme to apply the two methods
adaptively, are studied. Our theoretical and
experimental results demonstrate that the proposed
techniques are quite promising in maintaining accurate
cost models efficiently for a slowly changing dynamic
MDBS environment. Besides the application to MDBSs, the
proposed techniques can also be applied to the
automatic maintenance of cost models in self-managing
database systems.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "cost model; evolutionary technique; multidatabase;
query optimization; self-managing database",
}
@Article{Adi:2004:ASM,
author = "Asaf Adi and Opher Etzion",
title = "{Amit} --- the situation manager",
journal = j-VLDB-J,
volume = "13",
number = "2",
pages = "177--203",
month = may,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-003-0108-y",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:10 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "This paper presents the ``situation manager'', a tool
that includes both a language and an efficient runtime
execution mechanism aimed at reducing the complexity of
active applications. This tool follows the observation
that in many cases there is a gap between current tools
that enable one to react to a single event (following
the ECA: event-condition-action paradigm) and the
reality in which a single event may not require any
reaction; however, the reaction should be given to
patterns over the event history. The concept of
presented in this paper extends the concept of in its
expressive power, flexibility, and usability. This
paper motivates the work, surveys other efforts in this
area, and discusses both the language and the execution
model.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "active databases; active technology; composite events;
high-level languages",
}
@Article{Freytag:2004:BPV,
author = "Johann-Christoph Freytag and Serge Abiteboul and Mike
Carey",
title = "Best papers of {VLDB} 2003",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "205--206",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0129-1",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Ilyas:2004:STJ,
author = "Ihab F. Ilyas and Walid G. Aref and Ahmed K.
Elmagarmid",
title = "Supporting top-$k$ join queries in relational
databases",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "207--221",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0128-2",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Ranking queries, also known as top-$k$ queries,
produce results that are ordered on some computed
score. Typically, these queries involve joins, where
users are usually interested only in the top-$k$ join
results. Top-$k$ queries are dominant in many emerging
applications, e.g., multimedia retrieval by content,
Web databases, data mining, middlewares, and most
information retrieval applications. Current relational
query processors do not handle ranking queries
efficiently, especially when joins are involved. In
this paper, we address supporting top-$k$ join queries
in relational query processors. We introduce a new
rank-join algorithm that makes use of the individual
orders of its inputs to produce join results ordered on
a user-specified scoring function. The idea is to rank
the join results progressively during the join
operation. We introduce two physical query operators
based on variants of ripple join that implement the
rank-join algorithm. The operators are nonblocking and
can be integrated into pipelined execution plans. We
also propose an efficient heuristic designed to
optimize a top-$k$ join query by choosing the best join
order. We address several practical issues and
optimization heuristics to integrate the new join
operators in practical query processors. We implement
the new operators inside a prototype database engine
based on PREDATOR. The experimental evaluation of our
approach compares recent algorithms for joining ranked
inputs and shows superior performance.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "query operators; rank aggregarion; ranking; top-$k$
queries",
}
@Article{Papadimitriou:2004:AUS,
author = "Spiros Papadimitriou and Anthony Brockwell and
Christos Faloutsos",
title = "Adaptive, unsupervised stream mining",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "222--239",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0130-8",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Sensor devices and embedded processors are becoming
widespread, especially in measurement/monitoring
applications. Their limited resources (CPU, memory
and/or communication bandwidth, and power) pose some
interesting challenges. We need concise, expressive
models to represent the important features of the data
and that lend themselves to efficient estimation. In
particular, under these severe constraints, we want
models and estimation methods that (a) require little
memory and a single pass over the data, (b) can adapt
and handle arbitrary periodic components, and (c) can
deal with various types of noise. We propose
${\mathrm{AWSOM}}$ (Arbitrary Window Stream mOdeling
Method), which allows sensors in remote or hostile
environments to efficiently and effectively discover
interesting patterns and trends. This can be done
automatically, i.e., with no prior inspection of the
data or any user intervention and expert tuning before
or during data gathering. Our algorithms require
limited resources and can thus be incorporated into
sensors --- possibly alongside a distributed query
processing engine [10,6,27]. Updates are performed in
constant time with respect to stream size using
logarithmic space. Existing forecasting methods
(SARIMA, GARCH, etc.) and ``traditional'' Fourier and
wavelet analysis fall short on one or more of these
requirements. To the best of our knowledge,
${\mathrm{AWSOM}}$ is the first framework that combines
all of the above characteristics. Experiments on real
and synthetic datasets demonstrate that
${\mathrm{AWSOM}}$ discovers meaningful patterns over
long time periods. Thus, the patterns can also be used
to make long-range forecasts, which are notoriously
difficult to perform. In fact, ${\mathrm{AWSOM}}$
outperforms manually set up autoregressive models, both
in terms of long-term pattern detection and modeling
and by at least $10 \times$ in resource consumption.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Labrinidis:2004:ETB,
author = "Alexandros Labrinidis and Nick Roussopoulos",
title = "Exploring the tradeoff between performance and data
freshness in database-driven {Web} servers",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "240--255",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0131-7",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Personalization, advertising, and the sheer volume of
online data generate a staggering amount of dynamic Web
content. In addition to Web caching, view
materialization has been shown to accelerate the
generation of dynamic Web content. View materialization
is an attractive solution as it decouples the serving
of access requests from the handling of updates. In the
context of the Web, selecting which views to
materialize must be decided online and needs to
consider both performance and data freshness, which we
refer to as the online view selection problem. In this
paper, we define data freshness metrics, provide an
adaptive algorithm for the online view selection
problem that is based on user-specified data freshness
requirements, and present experimental results.
Furthermore, we examine alternative metrics for data
freshness and extend our proposed algorithm to handle
multiple users and alternative definitions of data
freshness.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{He:2004:AIW,
author = "Hai He and Weiyi Meng and Clement Yu and Zonghuan
Wu",
title = "Automatic integration of {Web} search interfaces with
{WISE}-Integrator",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "256--273",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0126-4",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "An increasing number of databases are becoming Web
accessible through form-based search interfaces, and
many of these sources are database-driven e-commerce
sites. It is a daunting task for users to access
numerous Web sites individually to get the desired
information. Hence, providing a unified access to
multiple e-commerce search engines selling similar
products is of great importance in allowing users to
search and compare products from multiple sites with
ease. One key task for providing such a capability is
to integrate the Web search interfaces of these
e-commerce search engines so that user queries can be
submitted against the integrated interface. Currently,
integrating such search interfaces is carried out
either manually or semiautomatically, which is
inefficient and difficult to maintain. In this paper,
we present WISE-Integrator --- a tool that performs
automatic integration of Web Interfaces of Search
Engines. WISE-Integrator explores a rich set of special
metainformation that exists in Web search interfaces
and uses the information to identify matching
attributes from different search interfaces for
integration. It also resolves domain differences of
matching attributes. In this paper, we also discuss how
to automatically extract information from search
interfaces that is needed by WISE-Integrator to perform
automatic interface integration. Our experimental
results, based on 143 real-world search interfaces in
four different domains, indicate that WISE-Integrator
can achieve high attribute matching accuracy and can
produce high-quality integrated search interfaces
without human interactions.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "attribute matching; interface extraction; metasearch;
schema integration; Web search interface integration",
}
@Article{Velegrakis:2004:PMC,
author = "Yannis Velegrakis and Ren{\'e} J. Miller and Lucian
Popa",
title = "Preserving mapping consistency under schema changes",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "274--293",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0136-2",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In dynamic environments like the Web, data sources may
change not only their data but also their schemas,
their semantics, and their query capabilities. When a
mapping is left inconsistent by a schema change, it has
to be detected and updated. We present a novel
framework and a tool (ToMAS) for automatically adapting
(rewriting) mappings as schemas evolve. Our approach
considers not only local changes to a schema but also
changes that may affect and transform many components
of a schema. Our algorithm detects mappings affected by
structural or constraint changes and generates all the
rewritings that are consistent with the semantics of
the changed schemas. Our approach explicitly models
mapping choices made by a user and maintains these
choices, whenever possible, as the schemas and mappings
evolve. When there is more than one candidate
rewriting, the algorithm may rank them based on how
close they are to the semantics of the existing
mappings.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Florescu:2004:BSX,
author = "Daniela Florescu and Chris Hillery and Donald Kossmann
and Paul Lucas and Fabio Riccardi and Till Westmann and
J. Carey and Arvind Sundararajan",
title = "The {BEA} streaming {XQuery} processor",
journal = j-VLDB-J,
volume = "13",
number = "3",
pages = "294--315",
month = sep,
year = "2004",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-004-0137-1",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:11 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "This paper describes the design, implementation, and
performance characteristics of a commercial XQuery
processing engine, the BEA streaming XQuery processor.
This XQuery engine was designed to provide high
performance for message-processing applications, i.e.,
for transforming XML data streams. The engine is a
central component of the 8.1 release of BEA's
WebLogic Integration (WLI) product. The BEA XQuery
engine is fully compliant with the August 2002 draft of
the W3C XML Query Language specification and we are
currently porting it to the latest version of the
XQuery language (July 2004). A goal of this paper is to
describe how a fully compliant yet efficient XQuery
engine has been built from a few relatively simple
components and well-understood technologies.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Gehrke:2004:GES,
author = "Johannes Gehrke and M. Hellerstein",
title = "{Guest Editorial} to the special issue on data stream
processing",
journal = j-VLDB-J,
volume = "13",
number = "4",
pages = "317--317",
month = dec,
year = "2004",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:12 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Yang:2004:FHQ,
author = "Huai Yang and Li Lee and Wynne Hsu",
title = "Finding hot query patterns over an {XQuery} stream",
journal = j-VLDB-J,
volume = "13",
number = "4",
pages = "318--332",
month = dec,
year = "2004",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:12 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Caching query results is one efficient approach to
improving the performance of XML management systems.
This entails the discovery of frequent XML queries
issued by users. In this paper, we model user queries
as a stream of XML query pattern trees and mine the
frequent query patterns over the query stream. To
facilitate the one-pass mining process, we devise a
novel data structure called DTS to summarize the
pattern trees seen so far. By grouping the incoming
pattern trees into batches, we can dynamically mark the
active portion of the current batch in DTS and limit
the enumeration of candidate trees to only the
currently active pattern trees. We also design another
summary data structure called ECTree that provides for
the incremental computation of the frequent tree
patterns over the query stream. Based on the above two
constructs, we present two mining algorithms called
XQSMinerI and XQSMinerII. XQSMinerI is fast, but it
tends to overestimate, while XQSMinerII adopts a
filter-and-refine approach to minimize the amount of
overestimation. Experimental results show that the
proposed methods are both efficient and scalable and
require only small memory footprints.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "frequent pattern mining; pattern tree; stream mining;
tree mining; XML query pattern",
}
@Article{Babcock:2004:OSD,
author = "Brian Babcock and Shivnath Babu and Mayur Datar and
Rajeev Motwani and Dilys Thomas",
title = "Operator scheduling in data stream systems",
journal = j-VLDB-J,
volume = "13",
number = "4",
pages = "333--353",
month = dec,
year = "2004",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:12 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In many applications involving continuous data
streams, data arrival is bursty and data rate
fluctuates over time. Systems that seek to give rapid
or real-time query responses in such an environment
must be prepared to deal gracefully with bursts in data
arrival without compromising system performance. We
discuss one strategy for processing bursty streams ---
adaptive, load-aware scheduling of query operators to
minimize resource consumption during times of peak
load. We show that the choice of an operator scheduling
strategy can have significant impact on the runtime
system memory usage as well as output latency. Our aim
is to design a scheduling strategy that minimizes the
maximum runtime system memory while maintaining the
output latency within prespecified bounds. We first
present Chain scheduling, an operator scheduling
strategy for data stream systems that is near-optimal
in minimizing runtime memory usage for any collection
of single-stream queries involving selections,
projections, and foreign-key joins with stored
relations. Chain scheduling also performs well for
queries with sliding-window joins over multiple streams
and multiple queries of the above types. However,
during bursts in input streams, when there is a buildup
of unprocessed tuples, Chain scheduling may lead to
high output latency. We study the online problem of
minimizing maximum runtime memory, subject to a
constraint on maximum latency. We present preliminary
observations, negative results, and heuristics for this
problem. A thorough experimental evaluation is provided
where we demonstrate the potential benefits of Chain
scheduling and its different variants, compare it with
competing scheduling strategies, and validate our
analytical conclusions.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data streams; latency; memory management; scheduling",
}
@Article{Ganguly:2004:TSE,
author = "Sumit Ganguly and Minos Garofalakis and Rajeev
Rastogi",
title = "Tracking set-expression cardinalities over continuous
update streams",
journal = j-VLDB-J,
volume = "13",
number = "4",
pages = "354--369",
month = dec,
year = "2004",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:12 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "There is growing interest in algorithms for processing
and querying continuous data streams (i.e., data seen
only once in a fixed order) with limited memory
resources. In its most general form, a data stream is
actually an update stream, i.e., comprising data-item
deletions as well as insertions. Such massive update
streams arise naturally in several application domains
(e.g., monitoring of large IP network installations or
processing of retail-chain transactions). Estimating
the cardinality of set expressions defined over several
(possibly distributed) update streams is perhaps one of
the most fundamental query classes of interest; as an
example, such a query may ask ``what is the number of
distinct IP source addresses seen in passing packets
from both router $R_1$ and $R_2$ but not router
$R_3$?''. Earlier work only addressed very restricted
forms of this problem, focusing solely on the special
case of insert-only streams and specific operators
(e.g., union). In this paper, we propose the first
space-efficient algorithmic solution for estimating the
cardinality of full-fledged set expressions over
general update streams. Our estimation algorithms are
probabilistic in nature and rely on a novel, hash-based
synopsis data structure, termed ''2-level hash
sketch''. We demonstrate how our 2-level hash sketch
synopses can be used to provide low-error,
high-confidence estimates for the cardinality of set
expressions (including operators such as set union,
intersection, and difference) over continuous update
streams, using only space that is significantly
sublinear in the sizes of the streaming input
(multi-)sets. Furthermore, our estimators never require
rescanning or resampling of past stream items,
regardless of the number of deletions in the stream. We
also present lower bounds for the problem,
demonstrating that the space usage of our estimation
algorithms is within small factors of the optimal.
Finally, we propose an optimized, time-efficient stream
synopsis (based on 2-level hash sketches) that provides
similar, strong accuracy-space guarantees while
requiring only guaranteed logarithmic maintenance time
per update, thus making our methods applicable for
truly rapid-rate data streams. Our results from an
empirical study of our synopsis and estimation
techniques verify the effectiveness of our approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "approximate query processing; data streams; data
synopses; randomized algorithms; set expressions",
}
@Article{Balakrishnan:2004:RA,
author = "Hari Balakrishnan and Magdalena Balazinska and Don
Carney and U{\=g}ur {\c{C}}etintemel and Mitch
Cherniack and Christian Convey and Eddie Galvez and Jon
Salz and Michael Stonebraker and Nesime Tatbul and
Richard Tibbetts and Stan Zdonik",
title = "Retrospective on {Aurora}",
journal = j-VLDB-J,
volume = "13",
number = "4",
pages = "370--383",
month = dec,
year = "2004",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:12 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "This experience paper summarizes the key lessons we
learned throughout the design and implementation of the
Aurora stream-processing engine. For the past 2 years,
we have built five stream-based applications using
Aurora. We first describe in detail these applications
and their implementation in Aurora. We then reflect on
the design of Aurora based on this experience. Finally,
we discuss our initial ideas on a follow-on project,
called Borealis, whose goal is to eliminate the
limitations of Aurora as well as to address new key
challenges and applications in the stream-processing
domain.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data stream management; distributed stream processing;
monitoring applications; quality-of-service;
stream-processing engines",
}
@Article{Sharaf:2004:BEE,
author = "A. Sharaf and Jonathan Beaver and Alexandros
Labrinidis and K. Chrysanthis",
title = "Balancing energy efficiency and quality of aggregate
data in sensor networks",
journal = j-VLDB-J,
volume = "13",
number = "4",
pages = "384--403",
month = dec,
year = "2004",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:12 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In-network aggregation has been proposed as one method
for reducing energy consumption in sensor networks. In
this paper, we explore two ideas related to further
reducing energy consumption in the context of
in-network aggregation. The first is by influencing the
construction of the routing trees for sensor networks
with the goal of reducing the size of transmitted data.
To this end, we propose a group-aware network
configuration method that ``clusters'' along the same
path sensor nodes that belong to the same group. The
second idea involves imposing a hierarchy of output
filters on the sensor network with the goal of both
reducing the size of transmitted data and minimizing
the number of transmitted messages. More specifically,
we propose a framework to use temporal coherency
tolerances in conjunction with in-network aggregation
to save energy at the sensor nodes while maintaining
specified quality of data. These tolerances are based
on user preferences or can be dictated by the network
in cases where the network cannot support the current
tolerance level. Our framework, called TiNA, works on
top of existing in-network aggregation schemes. We
evaluate experimentally our proposed schemes in the
context of existing in-network aggregation schemes. We
present experimental results measuring energy
consumption, response time, and quality of data for
Group-By queries. Overall, our schemes provide
significant energy savings with respect to
communication and a negligible drop in quality of
data.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "in-network query processing; power-aware computing;
semantic routing; sensor networks",
}
@Article{Ozsu:2005:E,
author = "Tamer {\"O}zsu",
title = "Editorial",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "1--1",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Gao:2005:JOT,
author = "Dengfeng Gao and S. Jensen and T. Snodgrass and D.
Soo",
title = "Join operations in temporal databases",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "2--29",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Joins are arguably the most important relational
operators. Poor implementations are tantamount to
computing the Cartesian product of the input relations.
In a temporal database, the problem is more acute for
two reasons. First, conventional techniques are
designed for the evaluation of joins with equality
predicates rather than the inequality predicates
prevalent in valid-time queries. Second, the presence
of temporally varying data dramatically increases the
size of a database. These factors indicate that
specialized techniques are needed to efficiently
evaluate temporal joins. We address this need for
efficient join evaluation in temporal databases. Our
purpose is twofold. We first survey all previously
proposed temporal join operators. While many temporal
join operators have been defined in previous work, this
work has been done largely in isolation from competing
proposals, with little, if any, comparison of the
various operators. We then address evaluation
algorithms, comparing the applicability of various
algorithms to the temporal join operators and
describing a performance study involving algorithms for
one important operator, the temporal equijoin. Our
focus, with respect to implementation, is on
non-index-based join algorithms. Such algorithms do not
rely on auxiliary access paths but may exploit sort
orderings to achieve efficiency.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "attribute skew; interval join; partition join;
sort-merge join; temporal Cartesian product; temporal
join; timestamp skew",
}
@Article{Balmin:2005:SQX,
author = "Andrey Balmin and Yannis Papakonstantinou",
title = "Storing and querying {XML} data using denormalized
relational databases",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "30--49",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "XML database systems emerge as a result of the
acceptance of the XML data model. Recent works have
followed the promising approach of building XML
database management systems on underlying
RDBMS's. Achieving query processing performance
reduces to two questions: (i) How should the XML data
be decomposed into data that are stored in the RDBMS?
(ii) How should the XML query be translated into an
efficient plan that sends one or more SQL queries to
the underlying RDBMS and combines the data into the XML
result? We provide a formal framework for XML
Schema-driven decompositions, which encompasses the
decompositions proposed in prior work and extends them
with decompositions that employ denormalized tables and
binary-coded XML fragments. We provide corresponding
query processing algorithms that translate the XML
query conditions into conditions on the relational
tables and assemble the decomposed data into the XML
query result. Our key performance focus is the response
time for delivering the first results of a query. The
most effective of the described decompositions have
been implemented in XCacheDB, an XML DBMS built on top
of a commercial RDBMS, which serves as our experimental
basis. We present experiments and analysis that point
to a class of decompositions, called inlined
decompositions, that improve query performance for full
results and first results, without significant increase
in the size of the database.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Gal:2005:FME,
author = "Avigdor Gal and Ateret Anaby-Tavor and Alberto
Trombetta and Danilo Montesi",
title = "A framework for modeling and evaluating automatic
semantic reconciliation",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "50--67",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The introduction of the Semantic Web vision and the
shift toward machine understandable Web resources has
unearthed the importance of automatic semantic
reconciliation. Consequently, new tools for automating
the process were proposed. In this work we present a
formal model of semantic reconciliation and analyze in
a systematic manner the properties of the process
outcome, primarily the inherent uncertainty of the
matching process and how it reflects on the resulting
mappings. An important feature of this research is the
identification and analysis of factors that impact the
effectiveness of algorithms for automatic semantic
reconciliation, leading, it is hoped, to the design of
better algorithms by reducing the uncertainty of
existing algorithms. Against this background we
empirically study the aptitude of two algorithms to
correctly match concepts. This research is both timely
and practical in light of recent attempts to develop
and utilize methods for automatic semantic
reconciliation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "mapping; ontology versioning; semantic
interoperability",
}
@Article{Halevy:2005:SML,
author = "Y. Halevy and G. Ives and Dan Suciu and Igor
Tatarinov",
title = "Schema mediation for large-scale semantic data
sharing",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "68--83",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Intuitively, data management and data integration
tools should be well suited for exchanging information
in a semantically meaningful way. Unfortunately, they
suffer from two significant problems: they typically
require a common and comprehensive schema design before
they can be used to store or share information, and
they are difficult to extend because schema evolution
is heavyweight and may break backward compatibility. As
a result, many large-scale data sharing tasks are more
easily facilitated by non-database-oriented tools that
have little support for semantics. The goal of the peer
data management system (PDMS) is to address this need:
we propose the use of a decentralized, easily
extensible data management architecture in which any
user can contribute new data, schema information, or
even mappings between other peers' schemas.
PDMSs represent a natural step beyond data integration
systems, replacing their single logical schema with an
interlinked collection of semantic mappings between
peers' individual schemas. This paper considers
the problem of schema mediation in a PDMS. Our first
contribution is a flexible language for mediating
between peer schemas that extends known data
integration formalisms to our more complex
architecture. We precisely characterize the complexity
of query answering for our language. Next, we describe
a reformulation algorithm for our language that
generalizes both global-as-view and local-as-view query
answering algorithms. Then we describe several methods
for optimizing the reformulation algorithm and an
initial set of experiments studying its performance.
Finally, we define and consider several {\em global\/}
problems in managing semantic mappings in a PDMS.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data integration; peer data management; schema
mediation; Web and databases",
}
@Article{Benatallah:2005:AWS,
author = "Boualem Benatallah and Mohand-Said Hacid and Alain
Leger and Christophe Rey and Farouk Toumani",
title = "On automating {Web} services discovery",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "84--96",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "One of the challenging problems that Web service
technology faces is the ability to effectively discover
services based on their capabilities. We present an
approach to tackling this problem in the context of
description logics (DLs). We formalize service
discovery as a new instance of the problem of rewriting
concepts using terminologies. We call this new instance
the {\em best covering problem}. We provide a
formalization of the {\em best covering problem\/} in
the framework of DL-based ontologies and propose a
hypergraph-based algorithm to effectively compute best
covers of a given request. We propose a novel
matchmaking algorithm that takes as input a service
request (or query) $Q$ and an ontology $\mathcal{T}$ of
services and finds a set of services called a ``best
cover'' of $Q$ whose descriptions contain as much {\em
common information\/} with $Q$ as possible and as
little {\em extra information\/} with respect to $Q$ as
possible. We have implemented the proposed discovery
technique and used the developed prototype in the
context of the {\em Multilingual Knowledge Based
European Electronic Marketplace\/} (MKBEEM) project.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "description logics; discovery; hypergraphs; semantic
matchmaking; Web services",
}
@Article{Sattler:2005:CBQ,
author = "Kai-Uwe Sattler and Ingolf Geist and Eike Schallehn",
title = "Concept-based querying in mediator systems",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "97--111",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "One approach to overcoming heterogeneity as a part of
data integration in mediator systems is the use of
metadata in the form of a vocabulary or ontology to
represent domain knowledge explicitly. This requires
including this meta level during query formulation and
processing. In this paper, we address this problem in
the context of a mediator that uses a concept-based
integration model and an extension of the XQuery
language called CQuery. This mediator has been
developed as part of a project for integrating data
about cultural assets. We describe the language
extensions and their semantics as well as the rewriting
and evaluation steps. Furthermore, we discuss aspects
of caching and keyword-based search in support of an
efficient query formulation and processing.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data integration; mediator systems; query processing",
}
@Article{Tzitzikas:2005:MTB,
author = "Yannis Tzitzikas and Nicolas Spyratos and Panos
Constantopoulos",
title = "Mediators over taxonomy-based information sources",
journal = j-VLDB-J,
volume = "14",
number = "1",
pages = "112--136",
month = mar,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:14 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "We propose a mediator model for providing integrated
and unified access to multiple taxonomy-based sources.
Each source comprises a taxonomy and a database that
indexes objects under the terms of the taxonomy. A
mediator comprises a taxonomy and a set of relations
between the mediator's and the sources'
terms, called articulations. By combining different
modes of query evaluation at the sources and the
mediator and different types of query translation, a
flexible, efficient scheme of mediator operation is
obtained that can accommodate various application needs
and levels of answer quality. We adopt a simple
conceptual modeling approach (taxonomies and
intertaxonomy mappings) and we illustrate its
advantages in terms of ease of use, uniformity,
scalability, and efficiency. These characteristics make
this proposal appropriate for a large-scale network of
sources and mediators.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "approximate query translation; information
integration; mediators; taxonomies",
}
@Article{Gunopulos:2005:SEM,
author = "Dimitrios Gunopulos and George Kollios and J. Tsotras
and Carlotta Domeniconi",
title = "Selectivity estimators for multidimensional range
queries over real attributes",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "137--154",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Estimating the selectivity of multidimensional range
queries over real valued attributes has significant
applications in data exploration and database query
optimization. In this paper, we consider the following
problem: given a table of $d$ attributes whose domain
is the real numbers and a query that specifies a range
in each dimension, find a good approximation of the
number of records in the table that satisfy the query.
The simplest approach to tackle this problem is to
assume that the attributes are independent. More
accurate estimators try to capture the joint data
distribution of the attributes. In databases, such
estimators include the construction of multidimensional
histograms, random sampling, or the wavelet transform.
In statistics, kernel estimation techniques are being
used. Many traditional approaches assume that attribute
values come from discrete, finite domains, where
different values have high frequencies. However, for
many novel applications (as in temporal, spatial, and
multimedia databases) attribute values come from the
infinite domain of real numbers. Consequently, each
value appears very infrequently, a characteristic that
affects the behavior and effectiveness of the
estimator. Moreover, real-life data exhibit attribute
correlations that also affect the estimator. We present
a new histogram technique that is designed to
approximate the density of multidimensional datasets
with real attributes. Our technique defines buckets of
variable size and allows the buckets to overlap. The
size of the cells is based on the local density of the
data. The use of overlapping buckets allows a more
compact approximation of the data distribution. We also
show how to generalize kernel density estimators and
how to apply them to the multidimensional query
approximation problem. Finally, we compare the accuracy
of the proposed techniques with existing techniques
using real and synthetic datasets. The experimental
results show that the proposed techniques behave more
accurately in high dimensionalities than previous
approaches.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Alhajj:2005:VFC,
author = "Reda Alhajj and Faruk Polat and Cem Y{\'\i}lmaz",
title = "Views as first-class citizens in object-oriented
databases",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "155--169",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Extensibility and dynamic schema evolution are among
the attractive features that lead to the wide
acceptance of the object-oriented paradigm. Not knowing
all class hierarchy details should not prevent a user
from introducing new classes when necessary. Naive or
professional users may define new classes either by
using class definition constructs or as views. However,
improper placement of such classes leads to a flat
hierarchy with many things duplicated. To overcome this
problem, we automated the process in order to help the
user find the most appropriate position with respect to
her class in the hierarchy regardless of her knowledge
of the hierarchy. The system must be responsible for
the proper placement of new classes because only the
system has complete knowledge of the details of the
class hierarchy, especially in a dynamic environment
where changes are very frequent. In other published
work, we proved that to define a view it is enough to
have the set of objects that qualify to be in a view in
addition to having message expressions (possible paths)
that lead to desired values within those objects. Here,
we go further to map a view that is intended to be
persistent into a class. Then we investigate the proper
position of that class in the hierarchy. To achieve
this, we consider current characteristics of a new
class in order to derive its relationship with other
existing classes in the hierarchy. Another advantage of
the presented model is that views that generate new
objects are still updatable simply because we based the
creation of new objects on existing identities. In
other words, an object participates inside view objects
by its identity regardless of which particular values
from that object are of interest to the view. Values
are reachable via message expressions, not violating
encapsulation. This way, actual values are present in
only one place and can be updated.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "class hierarchy; object-oriented databases;
reusability; schema evolution; views",
}
@Article{Zhang:2005:OSM,
author = "Donghui Zhang and J. Tsotras",
title = "Optimizing spatial {Min\slash Max} aggregations",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "170--181",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Aggregate computation over a collection of spatial
objects appears in many real-life applications.
Aggregates are computed on values (weights) associated
with spatial objects, for example, the temperature or
rainfall over the area covered by the object. In this
paper we concentrate on MIN/MAX aggregations: ``given a
query rectangle, find the minimum/maximum weight among
all objects intersecting the query rectangle.''
Traditionally such queries have been performed as range
searches. Assuming that objects are indexed by a
spatial access method (SAM), the MIN/MAX is computed
while retrieving those objects intersecting the query
interval. This requires effort proportional to the
number of objects satisfying the query, which may be
large. A better approach is to maintain aggregate
information among the index nodes of the spatial access
method; then various index paths can be eliminated
during the range search. Yet another approach is to
build a specialized index that maintains the aggregate
incrementally. We propose four novel optimizations for
improving the performance of MIN/MAX queries when an
index structure (traditional or specialized) is
present. Moreover, we introduce the MR-tree, an
R-tree-like dynamic specialized index that incorporates
all four optimizations. Our experiments show that the
MR-tree offers drastic performance improvement over
previous solutions. As a byproduct of this work we
present an optimized version of the MSB-tree, an index
that has been proposed for the MIN/MAX computation over
1D interval objects.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "indexing; MIN/MAX; spatial aggregates",
}
@Article{Perich:2005:CJP,
author = "Filip Perich and Anupam Joshi and Yelena Yesha and Tim
Finin",
title = "Collaborative joins in a pervasive computing
environment",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "182--196",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "We present a collaborative query processing protocol
based on the principles of Contract Nets. The protocol
is designed for pervasive computing environments where,
in addition to operating on limited computing and
battery resources, mobile devices cannot always rely on
being able to access the wired infrastructure. Devices,
therefore, need to collaborate with each other in order
to obtain data otherwise inaccessible due to the nature
of the environment. Furthermore, by intelligently using
answers cached by peers, devices can reduce their
computation cost. We show the effectiveness of our
approach by evaluating performance of devices querying
for data while moving in a citylike environment.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "distributed join processing; mobile ad hoc networks;
peer-to-peer computing; pervasive computing
environments; query processing",
}
@Article{Josifovski:2005:QXS,
author = "Vanja Josifovski and Marcus Fontoura and Attila
Barta",
title = "Querying {XML} streams",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "197--210",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Efficient querying of XML streams will be one of the
fundamental features of next-generation information
systems. In this paper we propose the TurboXPath path
processor, which accepts a language equivalent to a
subset of the for-let-where constructs of XQuery over a
single document. TurboXPath can be extended to provide
full XQuery support or used to augment federated
database engines for efficient handling of queries over
XML data streams produced by external sources.
Internally, TurboXPath uses a tree-shaped path
expression with multiple outputs to drive the
execution. The result of a query execution is a
sequence of tuples of XML fragments matching the output
nodes. Based on a streamed execution model, TurboXPath
scales up to large documents and has limited memory
consumption for increased concurrency. Experimental
evaluation of a prototype demonstrates performance
gains compared to other state-of-the-art path
processors.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Aggarwal:2005:EEA,
author = "C. Aggarwal and S. Yu",
title = "An effective and efficient algorithm for
high-dimensional outlier detection",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "211--221",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The outlier detection problem has important
applications in the field of fraud detection, network
robustness analysis, and intrusion detection. Most such
applications are most important for high-dimensional
domains in which the data can contain hundreds of
dimensions. Many recent algorithms have been proposed
for outlier detection that use several concepts of
proximity in order to find the outliers based on their
relationship to the other points in the data. However,
in high-dimensional space, the data are sparse and
concepts using the notion of proximity fail to retain
their effectiveness. In fact, the sparsity of
high-dimensional data can be understood in a different
way so as to imply that every point is an equally good
outlier from the perspective of distance-based
definitions. Consequently, for high-dimensional data,
the notion of finding meaningful outliers becomes
substantially more complex and nonobvious. In this
paper, we discuss new techniques for outlier detection
that find the outliers by studying the behavior of
projections from the data set.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data mining; high-dimensional spaces; outlier
detection",
}
@Article{Yao:2005:HBL,
author = "D. Yao and Cyrus Shahabi and Per-{\AA}ke Larson",
title = "Hash-based labeling techniques for storage scaling",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "222--237",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Scalable storage architectures allow for the addition
or removal of storage devices to increase storage
capacity and bandwidth or retire older devices.
Assuming random placement of data objects across
multiple storage devices of a storage pool, our
optimization objective is to redistribute a minimum
number of objects after scaling the pool. In addition,
a uniform distribution, and hence a balanced load,
should be ensured after redistribution. Moreover, the
redistributed objects should be retrieved efficiently
during the normal mode of operation: in one I/O access
and with low complexity computation. To achieve this,
we propose an algorithm called random disk labeling
(RDL), based on double hashing, where storage can be
added or removed without any increase in complexity. We
compare RDL with other proposed techniques and
demonstrate its effectiveness through
experimentation.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "load balancing; random data placement; scalable
storage systems",
}
@Article{Kollios:2005:IMO,
author = "George Kollios and Dimitris Papadopoulos and Dimitrios
Gunopulos and J. Tsotras",
title = "Indexing mobile objects using dual transformations",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "238--256",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "With the recent advances in wireless networks,
embedded systems, and GPS technology, databases that
manage the location of moving objects have received
increased interest. In this paper, we present indexing
techniques for moving object databases. In particular,
we propose methods to index moving objects in order to
efficiently answer range queries about their current
and future positions. This problem appears in real-life
applications such as predicting future congestion areas
in a highway system or allocating more bandwidth for
areas where a high concentration of mobile phones is
imminent. We address the problem in external memory and
present dynamic solutions, both for the one-dimensional
and the two-dimensional cases. Our approach transforms
the problem into a dual space that is easier to index.
Important in this dynamic environment is not only query
performance but also the update processing, given the
large number of moving objects that issue updates. We
compare the dual-transformation approach with the
TPR-tree, an efficient method for indexing moving
objects that is based on time-parameterized index
nodes. An experimental evaluation shows that the
dual-transformation approach provides comparable query
performance but has much faster update processing.
Moreover, the dual method does not require establishing
a predefined query horizon.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "access methods; mobile objects; spatiotemporal
databases",
}
@Article{Jaluta:2005:CCR,
author = "Ibrahim Jaluta and Seppo Sippu and Eljas
Soisalon-Soininen",
title = "Concurrency control and recovery for balanced {B}-link
trees",
journal = j-VLDB-J,
volume = "14",
number = "2",
pages = "257--277",
month = apr,
year = "2005",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:15 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In this paper we present new concurrent and
recoverable B-link-tree algorithms. Unlike previous
algorithms, ours maintain the balance of the B-link
tree at all times, so that a logarithmic time bound for
a search or an update operation is guaranteed under
arbitrary sequences of record insertions and deletions.
A database transaction can contain any number of
operations of the form ``fetch the first (or next)
matching record'', ``insert a record'', or ``delete a
record'', where database records are identified by
their primary keys. Repeatable-read-level isolation for
transactions is guaranteed by key-range locking. The
algorithms apply the write-ahead logging (WAL) protocol
and the steal and no-force buffering policies for index
and data pages. Record inserts and deletes on leaf
pages of a B-link tree are logged using physiological
redo-undo log records. Each structure modification such
as a page split or merge is made an atomic action by
keeping the pages involved in the modification latched
for the (short) duration of the modification and the
logging of that modification; at most two B-link-tree
pages are kept $X$-latched at a time. Each structure
modification brings the B-link tree into a structurally
consistent and balanced state whenever the tree was
structurally consistent and balanced initially. Each
structure modification is logged using a single
physiological redo-only log record. Thus, a structure
modification will never be undone even if the
transaction that gave rise to it eventually aborts. In
restart recovery, the redo pass of our ARIES-based
recovery protocol will always produce a structurally
consistent and balanced B-link tree, on which the
database updates by backward-rolling transactions can
always be undone logically, when a physical
(page-oriented) undo is no longer possible.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "concurrency control; recovery; transaction;
tree-structure modifications",
}
@Article{Gaasterland:2005:SID,
author = "Terry Gaasterland and H. V. Jagadish and Louiqa
Raschid",
title = "Special issue on data management, analysis, and mining
for the life sciences",
journal = j-VLDB-J,
volume = "14",
number = "3",
pages = "279--280",
month = sep,
year = "2005",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-005-0165-5",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:16 MDT 2008",
bibsource = "http://portal.acm.org/;
http://springerlink.metapress.com/openurl.asp?genre=issue&issn=0938-1287&volume=14&issue=3;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://www.springerlink.com/openurl.asp?genre=article&issn=0938-1287&volume=14&issue=3&spage=279",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
}
@Article{Tian:2005:PMC,
author = "Yuanyuan Tian and Sandeep Tata and Richard A. Hankins
and Jignesh M. Patel",
title = "Practical methods for constructing suffix trees",
journal = j-VLDB-J,
volume = "14",
number = "3",
pages = "281--299",
month = sep,
year = "2005",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-005-0154-8",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:16 MDT 2008",
bibsource = "http://portal.acm.org/;
http://springerlink.metapress.com/openurl.asp?genre=issue&issn=0938-1287&volume=14&issue=3;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://www.springerlink.com/openurl.asp?genre=article&issn=0938-1287&volume=14&issue=3&spage=281",
abstract = "Sequence datasets are ubiquitous in modern
life-science applications, and querying sequences is a
common and critical operation in many of these
applications. The suffix tree is a versatile data
structure that can be used to evaluate a wide variety
of queries on sequence datasets, including evaluating
exact and approximate string matches, and finding
repeat patterns. However, methods for constructing
suffix trees are often very time-consuming, especially
for suffix trees that are large and do not fit in the
available main memory. Even when the suffix tree fits
in memory, it turns out that the processor cache
behavior of theoretically optimal suffix tree
construction methods is poor, resulting in poor
performance. Currently, there are a large number of
algorithms for constructing suffix trees, but the
practical tradeoffs in using these algorithms for
different scenarios are not well characterized. In this
paper, we explore suffix tree construction algorithms
over a wide spectrum of data sources and sizes. First,
we show that on modern processors, a cache-efficient
algorithm with $O(n^2)$ worst-case complexity
outperforms popular linear time algorithms like Ukkonen
and McCreight, even for in-memory construction. For
larger datasets, the disk I/O requirement quickly
becomes the bottleneck in each algorithm's performance.
To address this problem, we describe two approaches.
First, we present a buffer management strategy for the
$O(n^2)$ algorithm. The resulting new algorithm, which
we call ``Top Down Disk-based'' (TDD), scales to sizes
much larger than have been previously described in
literature. This approach far outperforms the best
known disk-based construction methods. Second, we
present a new disk-based suffix tree construction
algorithm that is based on a sort-merge paradigm, and
show that for constructing very large suffix trees with
very little resources, this algorithm is more efficient
than TDD.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "sequence matching; suffix tree construction",
}
@Article{Claypool:2005:SYD,
author = "Kajal T. Claypool and Elke A. Rundensteiner",
title = "Sync your data: update propagation for heterogeneous
protein databases",
journal = j-VLDB-J,
volume = "14",
number = "3",
pages = "300--317",
month = sep,
year = "2005",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-005-0155-7",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:16 MDT 2008",
bibsource = "http://portal.acm.org/;
http://springerlink.metapress.com/openurl.asp?genre=issue&issn=0938-1287&volume=14&issue=3;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://www.springerlink.com/openurl.asp?genre=article&issn=0938-1287&volume=14&issue=3&spage=300",
abstract = "The traditional model of bench (wet) chemistry in many
life sciences domain is today actively complemented by
computer-based discoveries utilizing the growing number
of online data sources. A typical {\em computer-based
discovery\/} scenario for many life scientists includes
the creation of local caches of pertinent information
from multiple online resources such as Swissprot
[Nucleic Acid Res. 1 (28), 45--48 (2000)], PIR [Nucleic
Acids Res. 28 (1), 41--44 (2000)], PDB [The Protein
DataBank. Wiley, New York (2003)], to enable efficient
data analysis. This local caching of data, however,
exposes their research and eventual results to the
problems of data staleness, that is, cached data may
quickly be obsolete or incorrect, dependent on the
updates that are made to the source data. This
represents a significant challenge to the scientific
community, forcing scientists to be continuously aware
of the frequent changes made to public data sources,
and more importantly aware of the potential effects on
their own derived data sets during the course of their
research. To address this significant challenge, in
this paper we present an approach for handling update
propagation between heterogeneous databases,
guaranteeing data freshness for scientists irrespective
of their choice of data source and its underlying data
model or interface. We propose a {\em middle-layer\/}
--based solution wherein first the change in the online
data source is translated to a sequence of changes in
the middle-layer; next each change in the middle-layer
is propagated through an algebraic representation of
the translation between the source and the target; and
finally the net-change is translated to a set of
changes that are then applied to the local cache. In
this paper, we present our algebraic model that
represents the mapping of the online resource to the
local cache, as well as our adaptive propagation
algorithm that can incrementally propagate both schema
and data changes from the source to the cache in a data
model independent manner. We present a case study based
on a joint ongoing project with our collaborators in
the Chemistry Department at UMass-Lowell to explicate
our approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data transformation; data translation; schema
evolution; update propagation; view maintenance",
}
@Article{Conery:2005:RBW,
author = "John S. Conery and Julian M. Catchen and Michael
Lynch",
title = "Rule-based workflow management for bioinformatics",
journal = j-VLDB-J,
volume = "14",
number = "3",
pages = "318--329",
month = sep,
year = "2005",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-005-0153-9",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:16 MDT 2008",
bibsource = "http://portal.acm.org/;
http://springerlink.metapress.com/openurl.asp?genre=issue&issn=0938-1287&volume=14&issue=3;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://www.springerlink.com/openurl.asp?genre=article&issn=0938-1287&volume=14&issue=3&spage=318",
abstract = "We describe a data-centric software architecture for
bioinformatics workflows and a rule-based workflow
enactment system that uses declarative specifications
of data dependences between steps to automatically
order the execution of those steps. A data-centric view
allows researchers to develop abstract descriptions of
workflow products and provides mechanisms for
describing workflow steps as objects. The rule-based
approach supports an iterative design methodology for
creating new workflows, where steps can be developed in
small, incremental updates, and the object orientation
allows workflow steps developed for one project to be
reused in other projects.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "bioinformatics; rule-based system; workflow",
}
@Article{Thakkar:2005:COE,
author = "Snehal Thakkar and Jos{\'e} Luis Ambite and Craig A.
Knoblock",
title = "Composing, optimizing, and executing plans for
bioinformatics web services",
journal = j-VLDB-J,
volume = "14",
number = "3",
pages = "330--353",
month = sep,
year = "2005",
CODEN = "VLDBFR",
DOI = "https://doi.org/10.1007/s00778-005-0158-4",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:16 MDT 2008",
bibsource = "http://portal.acm.org/;
http://springerlink.metapress.com/openurl.asp?genre=issue&issn=0938-1287&volume=14&issue=3;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
URL = "http://www.springerlink.com/openurl.asp?genre=article&issn=0938-1287&volume=14&issue=3&spage=330",
abstract = "The emergence of a large number of bioinformatics
datasets on the Internet has resulted in the need for
flexible and efficient approaches to integrate
information from multiple bioinformatics data sources
and services. In this paper, we present our approach to
automatically generate composition plans for web
services, optimize the composition plans, and execute
these plans efficiently. While data integration
techniques have been applied to the bioinformatics
domain, the focus has been on answering specific user
queries. In contrast, we focus on automatically
generating {\em parameterized\/} integration plans that
can be hosted as web services that respond to a range
of inputs. In addition, we present two novel techniques
that improve the execution time of the generated plans
by reducing the number of requests to the existing data
sources and by executing the generated plan more
efficiently. The first optimization technique, called
tuple-level filtering, analyzes the source/service
descriptions in order to automatically insert filtering
conditions in the composition plans that result in
fewer requests to the component web services. To ensure
that the filtering conditions can be evaluated, this
technique may include sensing operations in the
integration plan. The savings due to filtering
significantly exceed the cost of the sensing
operations. The second optimization technique consists
in mapping the integration plans into programs that can
be executed by a dataflow-style, streaming execution
engine. We use real-world bioinformatics web services
to show experimentally that (1) our automatic
composition techniques can efficiently generate
parameterized plans that integrate data from large
numbers of existing services and (2) our optimization
techniques can significantly reduce the response time
of the generated integration plans.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "bioinformatics; data integration; dataflow-style
streaming execution; query optimization; Web service
composition",
}
@Article{Vlachos:2006:IMT,
author = "Michail Vlachos and Marios Hadjieleftheriou and
Dimitrios Gunopulos and Eamonn Keogh",
title = "Indexing {Multidimensional Time-Series}",
journal = j-VLDB-J,
volume = "15",
number = "1",
pages = "1--20",
month = jan,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:17 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "While most time series data mining research has
concentrated on providing solutions for a single
distance function, in this work we motivate the need
for an index structure that can support multiple
distance measures. Our specific area of interest is the
efficient retrieval and analysis of similar
trajectories. Trajectory datasets are very common in
environmental applications, mobility experiments, and
video surveillance and are especially important for the
discovery of certain biological patterns. Our primary
similarity measure is based on the longest common
subsequence (LCSS) model that offers enhanced
robustness, particularly for noisy data, which are
encountered very often in real-world applications.
However, our index is able to accommodate other
distance measures as well, including the ubiquitous
Euclidean distance and the increasingly popular dynamic
time warping (DTW). While other researchers have
advocated one or other of these similarity measures, a
major contribution of our work is the ability to
support all these measures without the need to
restructure the index. Our framework guarantees no
false dismissals and can also be tailored to provide
much faster response time at the expense of slightly
reduced precision/recall. The experimental results
demonstrate that our index can help speed up the
computation of expensive similarity measures such as
the LCSS and the DTW.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "dynamic time warping; ensemble index; longest common
subsequence; motion capture; trajectories",
}
@Article{Zheng:2006:GPI,
author = "Baihua Zheng and Jianliang Xu and Wang-Chien Lee and
Lun Lee",
title = "Grid-partition index: a hybrid method for
nearest-neighbor queries in wireless location-based
services",
journal = j-VLDB-J,
volume = "15",
number = "1",
pages = "21--39",
month = jan,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:17 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Traditional nearest-neighbor (NN) search is based on
two basic indexing approaches: object-based indexing
and solution-based indexing. The former is constructed
based on the locations of data objects: using some
distance heuristics on object locations. The latter is
built on a precomputed solution space. Thus, NN queries
can be reduced to and processed as simple point queries
in this solution space. Both approaches exhibit some
disadvantages, especially when employed for wireless
data broadcast in mobile computing environments. In
this paper, we introduce a new index method, called the
{\em grid-partition index}, to support NN search in
both on-demand access and periodic broadcast modes of
mobile computing. The grid-partition index is
constructed based on the Voronoi diagram, i.e., the
solution space of NN queries. However, it has two
distinctive characteristics. First, it divides the
solution space into grid cells such that a query point
can be efficiently mapped into a grid cell around which
the nearest object is located. This significantly
reduces the search space. Second, the grid-partition
index stores the {\em objects\/} that are potential NNs
of any query falling within the cell. The storage of
objects, instead of the Voronoi cells, makes the
grid-partition index a hybrid of the solution-based and
object-based approaches. As a result, it achieves a
much more compact representation than the pure
solution-based approach and avoids backtracked
traversals required in the typical object-based
approach, thus realizing the advantages of both
approaches. We develop an incremental construction
algorithm to address the issue of object update. In
addition, we present a cost model to approximate the
search cost of different grid partitioning schemes. The
performances of the grid-partition index and existing
indexes are evaluated using both synthetic and real
data. The results show that, overall, the
grid-partition index significantly outperforms
object-based indexes and solution-based indexes.
Furthermore, we extend the grid-partition index to
support continuous-nearest-neighbor search. Both
algorithms and experimental results are presented.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "continuous-nearest-neighbor search; index structure;
location-dependent data; nearest-neighbor search;
wireless broadcast",
}
@Article{Tamir:2006:CGM,
author = "Raz Tamir and Yehuda Singer",
title = "On a confidence gain measure for association rule
discovery and scoring",
journal = j-VLDB-J,
volume = "15",
number = "1",
pages = "40--52",
month = jan,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:17 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "This article presents a new interestingness measure
for association rules called confidence gain (CG).
Focus is given to extraction of human associations
rather than associations between market products. There
are two main differences between the two (human and
market associations). The first difference is the
strong asymmetry of human associations (e.g., the
association ``shampoo''--``hair'' is much stronger than
``hair''--``shampoo''), where in market products
asymmetry is less intuitive and less evident. The
second is the background knowledge humans employ when
presented with a stimulus (input phrase).CG calculates
the local confidence of a given term compared to its
average confidence throughout a given database. CG is
found to outperform several association measures since
it captures both the asymmetric notion of an
association (as in the confidence measure) while adding
the comparison to an expected confidence (as in the
lift measure). The use of average confidence introduces
the ``background knowledge'' notion into the CG
measure. Various experiments have shown that CG and
local confidence gain (a low-complexity version of CG)
successfully generate association rules when compared
to human free associations. The experiments include a
large-scale ``free sssociation Turing test'' where
human free associations were compared to associations
generated by the CG and other association measures.
Rules discovered by CG were found to be significantly
better than those discovered by other measures. CG can
be used for many purposes, such as personalization,
sense disambiguation, query expansion, and improving
classification performance of small item sets within
large databases. Although CG was found to be useful for
Internet data retrieval, results can be easily used
over any type of database.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "association generation; association rule validation
methods; confidence gain; Web data management; Web
mining",
}
@Article{Bremer:2006:IDD,
author = "Jan-Marco Bremer and Michael Gertz",
title = "Integrating document and data retrieval based on
{XML}",
journal = j-VLDB-J,
volume = "15",
number = "1",
pages = "53--83",
month = jan,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:17 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "For querying structured and semistructured data, data
retrieval and document retrieval are two valuable and
complementary techniques that have not yet been fully
integrated. In this paper, we introduce integrated
information retrieval (IIR), an XML-based retrieval
approach that closes this gap. We introduce the syntax
and semantics of an extension of the XQuery language
called XQuery/IR. The extended language realizes IIR
and thereby allows users to formulate new kinds of
queries by nesting ranked document retrieval and
precise data retrieval queries. Furthermore, we detail
index structures and efficient query processing
approaches for implementing XQuery/IR. Based on a new
identification scheme for nodes in node-labeled tree
structures, the extended index structures require only
a fraction of the space of comparable index structures
that only support data retrieval.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data retrieval; document retrieval; index structures;
integrated information retrievals; structural join;
XML",
}
@Article{Ogras:2006:OSD,
author = "Y. Ogras and Hakan Ferhatosmanoglu",
title = "Online summarization of dynamic time series data",
journal = j-VLDB-J,
volume = "15",
number = "1",
pages = "84--98",
month = jan,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:17 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Managing large-scale time series databases has
attracted significant attention in the database
community recently. Related fundamental problems such
as dimensionality reduction, transformation, pattern
mining, and similarity search have been studied
extensively. Although the time series data are dynamic
by nature, as in data streams, current solutions to
these fundamental problems have been mostly for the
static time series databases. In this paper, we first
propose a framework to online summary generation for
large-scale and dynamic time series data, such as data
streams. Then, we propose online transform-based
summarization techniques over data streams that can be
updated in constant time and space. We present both the
exact and approximate versions of the proposed
techniques and provide error bounds for the approximate
case. One of our main contributions in this paper is
the extensive performance analysis. Our experiments
carefully evaluate the quality of the online summaries
for point, range, and $k ???? nn$ queries using
real-life dynamic data sets of substantial size.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data streams; dimensionality reduction; time-series
data; transformation-based summarization",
}
@Article{Goh:2006:DBM,
author = "Leng Goh and Yanfeng Shu and Zhiyong Huang and Chin
Ooi",
title = "Dynamic buffer management with extensible replacement
policies",
journal = j-VLDB-J,
volume = "15",
number = "2",
pages = "99--120",
month = jun,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:18 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "The objective of extensible DBMSs is to ease the
construction of specialized DBMSs for nontraditional
applications. Although much work has been done in
providing various levels of extensibility (e.g.,
extensibility of data types and operators, query
language extensibility, and query optimizer
extensibility), there has been very limited research in
providing extensibility at the buffer management level.
Supporting extensibility at the buffer management level
is important as it can contribute significantly to
overall system performance. This paper addresses the
problem of supporting extensibility of buffer
replacement policies. The main contribution is the
proposal of a framework for modeling buffer replacement
policies. This work is novel in two aspects. First, by
providing a uniform and generic specification of buffer
replacement policies, the proposed framework unifies
existing work in this area. Second, our work introduces
a new level of extensibility. None of the existing
extensible DBMSs, to our knowledge, provides
extensibility at the buffer management level. The
proposed framework provides a basis for the
construction of an extensible buffer manager as part of
a 100\% Java-based storage manager. We conducted an
extensive performance study to investigate the
performance of the proposed framework. The experimental
results demonstrate that the proposed framework is
indeed feasible for existing DBMSs and improves system
performance significantly without costing significant
overhead.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "buffer management; extensible DBMS; replacement
strategies",
}
@Article{Arasu:2006:CCQ,
author = "Arvind Arasu and Shivnath Babu and Jennifer Widom",
title = "The {CQL} continuous query language: semantic
foundations and query execution",
journal = j-VLDB-J,
volume = "15",
number = "2",
pages = "121--142",
month = jun,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:18 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "{\em CQL}, a {\em continuous query language}, is
supported by the STREAM prototype data stream
management system (DSMS) at Stanford. CQL is an
expressive SQL-based declarative language for
registering continuous queries against streams and
stored relations. We begin by presenting an abstract
semantics that relies only on ``black-box'' mappings
among streams and relations. From these mappings we
define a precise and general interpretation for
continuous queries. CQL is an instantiation of our
abstract semantics using SQL to map from relations to
relations, window specifications derived from SQL-99 to
map from streams to relations, and three new operators
to map from relations to streams. Most of the CQL
language is operational in the STREAM system. We
present the structure of CQL's query execution plans as
well as details of the most important components:
operators, interoperator queues, synopses, and sharing
of components among multiple operators and queries.
Examples throughout the paper are drawn from the {\em
Linear Road\/} benchmark recently proposed for DSMSs.
We also curate a public repository of data stream
applications that includes a wide variety of queries
expressed in CQL. The relative ease of capturing these
applications in CQL is one indicator that the language
contains an appropriate set of constructs for data
stream processing.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "continuous queries; data streams; query language;
query processing",
}
@Article{Hadjieleftheriou:2006:ISA,
author = "Marios Hadjieleftheriou and George Kollios and J.
Tsotras and Dimitrios Gunopulos",
title = "Indexing spatiotemporal archives",
journal = j-VLDB-J,
volume = "15",
number = "2",
pages = "143--164",
month = jun,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:18 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Spatiotemporal objects --- that is, objects that
evolve over time --- appear in many applications. Due
to the nature of such applications, storing the
evolution of objects through time in order to answer
historical queries (queries that refer to past states
of the evolution) requires a very large specialized
database, what is termed in this article a {\em
spatiotemporal archive}. Efficient processing of
historical queries on spatiotemporal archives requires
equally sophisticated indexing schemes. Typical
spatiotemporal indexing techniques represent the
objects using minimum bounding regions (MBR) extended
with a temporal dimension, which are then indexed using
traditional multidimensional index structures. However,
rough MBR approximations introduce excessive overlap
between index nodes, which deteriorates query
performance. This article introduces a robust indexing
scheme for answering spatiotemporal queries more
efficiently. A number of algorithms and heuristics are
elaborated that can be used to preprocess a
spatiotemporal archive in order to produce {\em finer
object approximations}, which, in combination with {\em
a multiversion index structure}, will greatly improve
query performance in comparison to the straightforward
approaches. The proposed techniques introduce a query
efficiency vs. space tradeoff that can help tune a
structure according to available resources. Empirical
observations for estimating the necessary amount of
additional storage space required for improving query
performance by a given factor are also provided.
Moreover, heuristics for applying the proposed ideas in
an online setting are discussed. Finally, a thorough
experimental evaluation is conducted to show the merits
of the proposed techniques.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "indexing; moving objects; spatiotemporal databases;
trajectories",
}
@Article{Guting:2006:MQM,
author = "Hartmut G{\"u}ting and Teixeira de Almeida and Zhiming
Ding",
title = "Modeling and querying moving objects in networks",
journal = j-VLDB-J,
volume = "15",
number = "2",
pages = "165--190",
month = jun,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:18 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "Moving objects databases have become an important
research issue in recent years. For modeling and
querying moving objects, there exists a comprehensive
framework of abstract data types to describe objects
moving freely in the 2D plane, providing data types
such as {\em moving point\/} or {\em moving region}.
However, in many applications people or vehicles move
along transportation networks. It makes a lot of sense
to model the network explicitly and to describe
movements relative to the network rather than
unconstrained space, because then it is much easier to
formulate in queries relationships between moving
objects and the network. Moreover, such models can be
better supported in indexing and query processing. In
this paper, we extend the ADT approach by modeling
networks explicitly and providing data types for static
and moving network positions and regions. In a highway
network, example entities corresponding to these data
types are motels, construction areas, cars, and traffic
jams. The network model is not too simplistic; it
allows one to distinguish simple roads and divided
highways and to describe the possible traversals of
junctions precisely. The new types and operations are
integrated seamlessly into the ADT framework to achieve
a relatively simple, consistent and powerful overall
model and query language for constrained and
unconstrained movement.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "ADT; data type; moving object; network;
spatio-temporal",
}
@Article{Chirkova:1999:AQU,
author = "Rada Chirkova and Chen Li and Jia Li",
title = "Answering queries using materialized views with
minimum size",
journal = j-VLDB-J,
volume = "15",
number = "3",
pages = "191--210",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:19 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "In this paper, we study the following problem. Given a
database and a set of queries, we want to find a set of
views that can compute the answers to the queries, such
that the amount of space, in bytes, required to store
the viewset is minimum on the given database. (We also
handle problem instances where the input has a {\em
set\/} of database instances, as described by an oracle
that returns the sizes of view relations for given view
definitions.) This problem is important for
applications such as distributed databases, data
warehousing, and data integration. We explore the
decidability and complexity of the problem for
workloads of conjunctive queries. We show that results
differ significantly depending on whether the workload
queries have self-joins. Further, for queries without
self-joins we describe a very compact search space of
views, which contains all views in at least one optimal
viewset. We present techniques for finding a
minimum-size viewset for a single query without
self-joins by using the shape of the query and its
constraints, and validate the approach by extensive
experiments.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data warehouses; distributed systems; minimum-size
viewsets; views",
remark = "Check month: April or May??",
}
@Article{Cao:1999:STD,
author = "Hu Cao and Ouri Wolfson and Goce Trajcevski",
title = "Spatio-temporal data reduction with deterministic
error bounds",
journal = j-VLDB-J,
volume = "15",
number = "3",
pages = "211--228",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:19 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "A common way of storing spatio-temporal information
about mobile devices is in the form of a 3D (2D
geography + time) trajectory. We argue that when
cellular phones and Personal Digital Assistants become
location-aware, the size of the spatio-temporal
information generated may prohibit efficient
processing. We propose to adopt a technique studied in
computer graphics, namely line-simplification, as an
approximation technique to solve this problem. Line
simplification will reduce the size of the
trajectories. Line simplification uses a distance
function in producing the trajectory approximation. We
postulate the desiderata for such a distance-function:
it should be sound, namely the error of the answers to
spatio-temporal queries must be bounded. We analyze
several distance functions, and prove that some are
sound in this sense for some types of queries, while
others are not. A distance function that is sound for
all common spatio-temporal query types is introduced
and analyzed. Then we propose an aging mechanism which
gradually shrinks the size of the trajectories as time
progresses. We also propose to adopt existing
linguistic constructs to manage the uncertainty
introduced by the trajectory approximation. Finally, we
analyze experimentally the effectiveness of
line-simplification in reducing the size of a
trajectories database.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data reduction; line simplification; moving objects
database; uncertainty",
remark = "Check month: April or May??",
}
@Article{Benetis:1999:NRN,
author = "Rimantas Benetis and S. Jensen and Gytis
Kar{\c{c}}iauskas and Simonas {\ocirc{S}}altenis",
title = "Nearest and reverse nearest neighbor queries for
moving objects",
journal = j-VLDB-J,
volume = "15",
number = "3",
pages = "229--249",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:19 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "With the continued proliferation of wireless
communications and advances in positioning
technologies, algorithms for efficiently answering
queries about large populations of moving objects are
gaining interest. This paper proposes algorithms for
$k$ nearest and reverse $k$ nearest neighbor queries on
the current and anticipated future positions of points
moving continuously in the plane. The former type of
query returns $k$ objects nearest to a query object for
each time point during a time interval, while the
latter returns the objects that have a specified query
object as one of their $k$ closest neighbors, again for
each time point during a time interval. In addition,
algorithms for so-called persistent and continuous
variants of these queries are provided. The algorithms
are based on the indexing of object positions
represented as linear functions of time. The results of
empirical performance experiments are reported.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "continuous queries; incremental update; location-based
services; mobile objects; neighbor queries; persistent
queries",
remark = "Check month: April or May??",
}
@Article{Pelleg:1999:DTS,
author = "Dan Pelleg and Andrew Moore",
title = "Dependency trees in sub-linear time and bounded
memory",
journal = j-VLDB-J,
volume = "15",
number = "3",
pages = "250--262",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:19 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "We focus on the problem of efficient learning of
dependency trees. Once grown, they can be used as a
special case of a Bayesian network, for PDF
approximation, and for many other uses. Given the data,
a well-known algorithm can fit an optimal tree in time
that is quadratic in the number of attributes and
linear in the number of records. We show how to modify
it to exploit partial knowledge about edge weights.
Experimental results show running time that is
near-constant in the number of records, without
significant loss in accuracy of the generated trees.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "data mining; dependency trees; fast algorithms;
probably approximately correct learning",
remark = "Check month: April or May??",
}
@Article{Che:1999:QOX,
author = "Dunren Che and Karl Aberer and Tamer {\"O}zsu",
title = "Query optimization in {XML} structured-document
databases",
journal = j-VLDB-J,
volume = "15",
number = "3",
pages = "263--289",
month = apr,
year = "1999",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:19 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
abstract = "While the information published in the form of
XML-compliant documents keeps fast mounting up,
efficient and effective query processing and
optimization for XML have now become more important
than ever. This article reports our recent advances in
XML structured-document query optimization. In this
article, we elaborate on a novel approach and the
techniques developed for XML query optimization. Our
approach performs heuristic-based algebraic
transformations on XPath queries, represented as PAT
algebraic expressions, to achieve query optimization.
This article first presents a comprehensive set of
general equivalences with regard to XML documents and
XML queries. Based on these equivalences, we developed
a large set of deterministic algebraic transformation
rules for XML query optimization. Our approach is
unique, in that it performs exclusively deterministic
transformations on queries for fast optimization. The
deterministic nature of the proposed approach
straightforwardly renders high optimization efficiency
and simplicity in implementation. Our approach is a
logical-level one, which is independent of any
particular storage model. Therefore, the optimizers
developed based on our approach can be easily adapted
to a broad range of XML data/information servers to
achieve fast query optimization. Experimental study
confirms the validity and effectiveness of the proposed
approach.",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
keywords = "deterministic query optimization; query
transformation; XML database; XML query optimization;
XML query processing",
remark = "Check month: April or May??",
}
@Article{Ferrari:2006:GES,
author = "Elena Ferrari and Bhavani Thuraisingham",
title = "Guest editorial: special issue on privacy preserving
data management",
journal = j-VLDB-J,
volume = "15",
number = "4",
pages = "291--292",
month = nov,
year = "2006",
CODEN = "VLDBFR",
ISSN = "1066-8888 (print), 0949-877X (electronic)",
ISSN-L = "1066-8888",
bibdate = "Mon Jun 23 10:51:20 MDT 2008",
bibsource = "http://portal.acm.org/;
https://www.math.utah.edu/pub/tex/bib/vldbj.bib",
acknowledgement = ack-nhfb,
fjournal = "VLDB Journal: Very Large Data Bases",
journal-URL = "http://portal.acm.org/toc.cfm?id=J869",
remark = "Check month: April or November??",
}
@Article{Mukherjee:2006:PPT,
author = "Shibnath Mukherjee and Zhiyuan Chen and Aryya
Gangopadhyay",
title = "A privacy-preserving technique for {Euclidean}
distance-based mining algorithms using
{Fourier}-related transforms",
journal =