/* Copyright (c) 1995-2000, The Hypersonic SQL Group. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the Hypersonic SQL Group nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE HYPERSONIC SQL GROUP, * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This software consists of voluntary contributions made by many individuals * on behalf of the Hypersonic SQL Group. * * * For work added by the HSQL Development Group: * * Copyright (c) 2001-2008, The HSQL Development Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the HSQL Development Group nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL HSQL DEVELOPMENT GROUP, HSQLDB.ORG, * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.hsqldb; import java.io.IOException; import org.hsqldb.HsqlNameManager.HsqlName; import org.hsqldb.index.RowIterator; import org.hsqldb.lib.ArrayUtil; import org.hsqldb.lib.HashMappedList; import org.hsqldb.lib.HashSet; import org.hsqldb.lib.HsqlArrayList; import org.hsqldb.lib.Iterator; import org.hsqldb.lib.StringUtil; import org.hsqldb.persist.CachedObject; import org.hsqldb.persist.DataFileCache; import org.hsqldb.persist.PersistentStore; import org.hsqldb.rowio.RowInputBinary; import org.hsqldb.rowio.RowInputInterface; import org.hsqldb.store.ValuePool; import j2me.math.Number; // fredt@users 20020130 - patch 491987 by jimbag@users - made optional // fredt@users 20020405 - patch 1.7.0 by fredt - quoted identifiers // for sql standard quoted identifiers for column and table names and aliases // applied to different places // fredt@users 20020225 - patch 1.7.0 - restructuring // some methods moved from Database.java, some rewritten // changes to several methods // fredt@users 20020225 - patch 1.7.0 - ON DELETE CASCADE // fredt@users 20020225 - patch 1.7.0 - named constraints // boucherb@users 20020225 - patch 1.7.0 - multi-column primary keys // fredt@users 20020221 - patch 513005 by sqlbob@users (RMP) // tony_lai@users 20020820 - patch 595099 - user defined PK name // tony_lai@users 20020820 - patch 595172 - drop constraint fix // kloska@users 20021030 - patch 1.7.2 - ON UPDATE CASCADE | SET NULL | SET DEFAULT // kloska@users 20021112 - patch 1.7.2 - ON DELETE SET NULL | SET DEFAULT // fredt@users 20021210 - patch 1.7.2 - better ADD / DROP INDEX for non-CACHED tables // fredt@users 20030901 - patch 1.7.2 - allow multiple nulls for UNIQUE columns // fredt@users 20030901 - patch 1.7.2 - reworked IDENTITY support // achnettest@users 20040130 - patch 878288 - bug fix for new indexes in memory tables by Arne Christensen // boucherb@users 20040327 - doc 1.7.2 - javadoc updates // boucherb@users 200404xx - patch 1.7.2 - proper uri for getCatalogName // fredt@users 20050000 - 1.8.0 updates in several areas // fredt@users 20050220 - patch 1.8.0 enforcement of DECIMAL precision/scale /** * Holds the data structures and methods for creation of a database table. * * * Extensively rewritten and extended in successive versions of HSQLDB. * * @author Thomas Mueller (Hypersonic SQL Group) * @version 1.8.0 * @since Hypersonic SQL */ public class Table extends BaseTable { // types of table public static final int SYSTEM_TABLE = 0; public static final int SYSTEM_SUBQUERY = 1; public static final int TEMP_TABLE = 2; public static final int MEMORY_TABLE = 3; public static final int CACHED_TABLE = 4; public static final int TEMP_TEXT_TABLE = 5; public static final int TEXT_TABLE = 6; public static final int VIEW = 7; // boucherb@users - for future implementation of SQL standard INFORMATION_SCHEMA static final int SYSTEM_VIEW = 8; // main properties // boucherb@users - access changed in support of metadata 1.7.2 public HashMappedList columnList; // columns in table private int[] primaryKeyCols; // column numbers for primary key private int[] primaryKeyTypes; // types for primary key private int[] primaryKeyColsSequence; // {0,1,2,...} int[] bestRowIdentifierCols; // column set for best index boolean bestRowIdentifierStrict; // true if it has no nullable column int[] bestIndexForColumn; // index of the 'best' index for each column Index bestIndex; // the best index overall - null if there is no user-defined index int identityColumn; // -1 means no such row NumberSequence identitySequence; // next value of identity column NumberSequence rowIdSequence; // next value of optional rowid // ----------------------------------------------------------------------- Constraint[] constraintList; // constrainst for the table HsqlArrayList[] triggerLists; // array of trigger lists private int[] colTypes; // fredt - types of columns private int[] colSizes; // fredt - copy of SIZE values for columns private int[] colScales; // fredt - copy of SCALE values for columns private boolean[] colNullable; // fredt - modified copy of isNullable() values private Expression[] colDefaults; // fredt - expressions of DEFAULT values private int[] defaultColumnMap; // fred - holding 0,1,2,3,... private boolean hasDefaultValues; //fredt - shortcut for above boolean sqlEnforceSize; // inherited from the database - // properties for subclasses protected int columnCount; // inclusive the hidden primary key public Database database; protected DataFileCache cache; protected HsqlName tableName; // SQL name private int tableType; protected boolean isReadOnly; protected boolean isTemp; protected boolean isCached; protected boolean isText; protected boolean isMemory; private boolean isView; protected boolean isLogged; protected int indexType; // fredt - type of index used protected boolean onCommitPreserve; // for temp tables // PersistentStore rowStore; Index[] indexList; // vIndex(0) is the primary key index /** * Constructor * * @param db * @param name * @param type * @param sessionid * @exception HsqlException */ Table(Database db, HsqlName name, int type) throws HsqlException { database = db; sqlEnforceSize = db.sqlEnforceStrictSize; identitySequence = new NumberSequence(null, 0, 1, Types.BIGINT); rowIdSequence = new NumberSequence(null, 0, 1, Types.BIGINT); switch (type) { case SYSTEM_SUBQUERY : isTemp = true; isMemory = true; break; case SYSTEM_TABLE : isMemory = true; break; case CACHED_TABLE : if (DatabaseURL.isFileBasedDatabaseType(db.getType())) { cache = db.logger.getCache(); isCached = true; isLogged = !database.isFilesReadOnly(); indexType = Index.DISK_INDEX; rowStore = new RowStore(); break; } type = MEMORY_TABLE; case MEMORY_TABLE : isMemory = true; isLogged = !database.isFilesReadOnly(); break; case TEMP_TABLE : isMemory = true; isTemp = true; break; case TEMP_TEXT_TABLE : if (!DatabaseURL.isFileBasedDatabaseType(db.getType())) { throw Trace.error(Trace.DATABASE_IS_MEMORY_ONLY); } isTemp = true; isText = true; isReadOnly = true; indexType = Index.POINTER_INDEX; rowStore = new RowStore(); break; case TEXT_TABLE : if (!DatabaseURL.isFileBasedDatabaseType(db.getType())) { throw Trace.error(Trace.DATABASE_IS_MEMORY_ONLY); } isText = true; indexType = Index.POINTER_INDEX; rowStore = new RowStore(); break; case VIEW : case SYSTEM_VIEW : isView = true; break; } // type may have changed above for CACHED tables tableType = type; tableName = name; primaryKeyCols = null; primaryKeyTypes = null; identityColumn = -1; columnList = new HashMappedList(); indexList = new Index[0]; constraintList = new Constraint[0]; triggerLists = new HsqlArrayList[TriggerDef.NUM_TRIGS]; // ---------------------------------------------------------------------------- // akede@users - 1.7.2 patch Files readonly // Changing the mode of the table if necessary if (db.isFilesReadOnly() && isFileBased()) { setIsReadOnly(true); } // ---------------------------------------------------------------------------- } boolean equals(Session session, String name) { /* if (isTemp && (session != null && session.getId() != ownerSessionId)) { return false; } */ return (tableName.name.equals(name)); } boolean equals(String name) { return (tableName.name.equals(name)); } boolean equals(HsqlName name) { return (tableName.equals(name)); } public final boolean isText() { return isText; } public final boolean isTemp() { return isTemp; } public final boolean isReadOnly() { return isDataReadOnly(); } final boolean isView() { return isView; } final int getIndexType() { return indexType; } public final int getTableType() { return tableType; } public boolean isDataReadOnly() { return isReadOnly; } /** * sets the isReadOnly flag, and invalidates the database's system tables as needed */ protected void setIsReadOnly(boolean newReadOnly) { isReadOnly = newReadOnly; database.setMetaDirty(true); } /** * Used by INSERT, DELETE, UPDATE operations */ void checkDataReadOnly() throws HsqlException { if (isDataReadOnly()) { throw Trace.error(Trace.DATA_IS_READONLY); } } // ---------------------------------------------------------------------------- // akede@users - 1.7.2 patch Files readonly void setDataReadOnly(boolean value) throws HsqlException { // Changing the Read-Only mode for the table is only allowed if // the database can realize it. if (!value && database.isFilesReadOnly() && isFileBased()) { throw Trace.error(Trace.DATA_IS_READONLY); } isReadOnly = value; } /** * Text or Cached Tables are normally file based */ boolean isFileBased() { return isCached || isText; } /** * For text tables */ protected void setDataSource(Session s, String source, boolean isDesc, boolean newFile) throws HsqlException { throw (Trace.error(Trace.TABLE_NOT_FOUND)); } /** * For text tables */ protected String getDataSource() { return null; } /** * For text tables. */ protected boolean isDescDataSource() { return false; } /** * For text tables. */ public void setHeader(String header) throws HsqlException { throw Trace.error(Trace.TEXT_TABLE_HEADER); } /** * For text tables. */ public String getHeader() { return null; } /** * determines whether the table is actually connected to the underlying data source. * *
This method is available for text tables only.
* * @see setDataSource * @see disconnect * @see isConnected */ public boolean isConnected() { return true; } /** * connects the table to the underlying data source. * *This method is available for text tables only.
* * @param session * denotes the current session. Might benull
.
*
* @see setDataSource
* @see disconnect
* @see isConnected
*/
public void connect(Session session) throws HsqlException {
throw Trace.error(Trace.CANNOT_CONNECT_TABLE);
}
/**
* disconnects the table from the underlying data source.
*
* This method is available for text tables only.
* * @param session * denotes the current session. Might benull
.
*
* @see setDataSource
* @see connect
* @see isConnected
*/
public void disconnect(Session session) throws HsqlException {
throw Trace.error(Trace.CANNOT_CONNECT_TABLE);
}
/**
* Adds a constraint.
*/
void addConstraint(Constraint c) {
constraintList =
(Constraint[]) ArrayUtil.toAdjustedArray(constraintList, c,
constraintList.length, 1);
}
/**
* Returns the list of constraints.
*/
Constraint[] getConstraints() {
return constraintList;
}
/**
* Returns the primary constraint.
*/
Constraint getPrimaryConstraint() {
return primaryKeyCols.length == 0 ? null
: constraintList[0];
}
/** @todo fredt - this can be improved to ignore order of columns in
* multi-column indexes */
/**
* Returns the index supporting a constraint with the given column signature.
* Only Unique constraints are considered.
*/
Index getUniqueConstraintIndexForColumns(int[] col) {
if (ArrayUtil.areEqual(getPrimaryIndex().getColumns(), col,
col.length, true)) {
return getPrimaryIndex();
}
for (int i = 0, size = constraintList.length; i < size; i++) {
Constraint c = constraintList[i];
if (c.getType() != Constraint.UNIQUE) {
continue;
}
if (ArrayUtil.areEqual(c.getMainColumns(), col, col.length,
true)) {
return c.getMainIndex();
}
}
return null;
}
/**
* Returns any foreign key constraint equivalent to the column sets
*/
Constraint getConstraintForColumns(Table tablemain, int[] colmain,
int[] colref) {
for (int i = 0, size = constraintList.length; i < size; i++) {
Constraint c = constraintList[i];
if (c.isEquivalent(tablemain, colmain, this, colref)) {
return c;
}
}
return null;
}
/**
* Returns any unique constraint equivalent to the column set
*/
Constraint getUniqueConstraintForColumns(int[] cols) {
for (int i = 0, size = constraintList.length; i < size; i++) {
Constraint c = constraintList[i];
if (c.isEquivalent(cols, Constraint.UNIQUE)) {
return c;
}
}
return null;
}
/**
* Returns any unique Constraint using this index
*
* @param index
* @return
*/
Constraint getUniqueOrPKConstraintForIndex(Index index) {
for (int i = 0, size = constraintList.length; i < size; i++) {
Constraint c = constraintList[i];
if (c.getMainIndex() == index
&& (c.getType() == Constraint.UNIQUE
|| c.getType() == Constraint.PRIMARY_KEY)) {
return c;
}
}
return null;
}
/**
* Returns the next constraint of a given type
*
* @param from
* @param type
* @return
*/
int getNextConstraintIndex(int from, int type) {
for (int i = from, size = constraintList.length; i < size; i++) {
Constraint c = constraintList[i];
if (c.getType() == type) {
return i;
}
}
return -1;
}
// fredt@users 20020220 - patch 475199 - duplicate column
/**
* Performs the table level checks and adds a column to the table at the
* DDL level. Only used at table creation, not at alter column.
*/
void addColumn(Column column) throws HsqlException {
if (findColumn(column.columnName.name) >= 0) {
throw Trace.error(Trace.COLUMN_ALREADY_EXISTS,
column.columnName.name);
}
if (column.isIdentity()) {
Trace.check(
column.getType() == Types.INTEGER
|| column.getType() == Types.BIGINT, Trace.WRONG_DATA_TYPE,
column.columnName.name);
Trace.check(identityColumn == -1, Trace.SECOND_PRIMARY_KEY,
column.columnName.name);
identityColumn = columnCount;
}
if (primaryKeyCols != null) {
Trace.doAssert(false, "Table.addColumn");
}
columnList.add(column.columnName.name, column);
columnCount++;
}
/**
* Add a set of columns based on a ResultMetaData
*/
void addColumns(Result.ResultMetaData metadata,
int count) throws HsqlException {
for (int i = 0; i < count; i++) {
Column column = new Column(
database.nameManager.newHsqlName(
metadata.colLabels[i], metadata.isLabelQuoted[i]), true,
metadata.colTypes[i], metadata.colSizes[i],
metadata.colScales[i], false, null);
addColumn(column);
}
}
/**
* Adds a set of columns based on a compiled Select
*/
void addColumns(Select select) throws HsqlException {
int colCount = select.iResultLen;
for (int i = 0; i < colCount; i++) {
Expression e = select.exprColumns[i];
Column column = new Column(
database.nameManager.newHsqlName(
e.getAlias(), e.isAliasQuoted()), true, e.getDataType(),
e.getColumnSize(), e.getColumnScale(), false, null);
addColumn(column);
}
}
/**
* Returns the HsqlName object fo the table
*/
public HsqlName getName() {
return tableName;
}
public int getId() {
return tableName.hashCode();
}
/**
* Changes table name. Used by 'alter table rename to'.
* Essential to use the existing HsqlName as this is is referenced by
* intances of Constraint etc.
*/
void rename(Session session, String newname,
boolean isquoted) throws HsqlException {
String oldname = tableName.name;
tableName.rename(newname, isquoted);
renameTableInCheckConstraints(session, oldname, newname);
}
/**
* Returns total column counts, including hidden ones.
*/
int getInternalColumnCount() {
return columnCount;
}
/**
* returns a basic duplicate of the table without the data structures.
*/
protected Table duplicate() throws HsqlException {
Table t = new Table(database, tableName, tableType);
t.onCommitPreserve = onCommitPreserve;
return t;
}
/**
* Match two columns arrays for length and type of columns
*
* @param col column array from this Table
* @param other the other Table object
* @param othercol column array from the other Table
* @throws HsqlException if there is a mismatch
*/
void checkColumnsMatch(int[] col, Table other,
int[] othercol) throws HsqlException {
if (col.length != othercol.length) {
throw Trace.error(Trace.COLUMN_COUNT_DOES_NOT_MATCH);
}
for (int i = 0; i < col.length; i++) {
// integrity check - should not throw in normal operation
if (col[i] >= columnCount || othercol[i] >= other.columnCount) {
throw Trace.error(Trace.COLUMN_COUNT_DOES_NOT_MATCH);
}
if (getColumn(col[i]).getType()
!= other.getColumn(othercol[i]).getType()) {
throw Trace.error(Trace.COLUMN_TYPE_MISMATCH);
}
}
}
// fredt@users 20020405 - patch 1.7.0 by fredt - DROP and CREATE INDEX bug
/**
* Constraints that need removing are removed outside this method.
*
* Support added for SET NULL and SET DEFAULT by kloska@users involves
* switching to checkCascadeUpdate(,,,,) when these rules are encountered
* in the constraint.(fredt@users)
*
* @table table table to update
* @param tableUpdateLists list of update lists
* @param row row to delete
* @param session
* @param delete
* @param path
* @throws HsqlException
*/
static void checkCascadeDelete(Session session, Table table,
HashMappedList tableUpdateLists, Row row,
boolean delete,
HashSet path) throws HsqlException {
for (int i = 0, size = table.constraintList.length; i < size; i++) {
Constraint c = table.constraintList[i];
if (c.getType() != Constraint.MAIN || c.getRef() == null) {
continue;
}
RowIterator refiterator = c.findFkRef(session, row.getData(),
delete);
if (!refiterator.hasNext()) {
continue;
}
try {
if (c.core.deleteAction == Constraint.NO_ACTION) {
if (c.core.mainTable == c.core.refTable) {
Row refrow = refiterator.next();
// fredt - it's the same row
// this supports deleting a single row
// in future we can iterate over and check against
// the full delete row list to enable multi-row
// with self-referencing FK's deletes
if (row.equals(refrow)) {
continue;
}
}
throw Trace.error(Trace.INTEGRITY_CONSTRAINT_VIOLATION,
Trace.Constraint_violation,
new Object[] {
c.core.fkName.name, c.core.refTable.getName().name
});
}
Table reftable = c.getRef();
// shortcut when deltable has no imported constraint
boolean hasref =
reftable.getNextConstraintIndex(0, Constraint.MAIN) != -1;
// if (reftable == this) we don't need to go further and can return ??
if (delete == false && hasref == false) {
continue;
}
Index refindex = c.getRefIndex();
int[] m_columns = c.getMainColumns();
int[] r_columns = c.getRefColumns();
Object[] mdata = row.getData();
boolean isUpdate = c.getDeleteAction() == Constraint.SET_NULL
|| c.getDeleteAction()
== Constraint.SET_DEFAULT;
// -- list for records to be inserted if this is
// -- a 'ON DELETE SET [NULL|DEFAULT]' constraint
HashMappedList rowSet = null;
if (isUpdate) {
rowSet = (HashMappedList) tableUpdateLists.get(reftable);
if (rowSet == null) {
rowSet = new HashMappedList();
tableUpdateLists.add(reftable, rowSet);
}
}
// walk the index for all the nodes that reference delnode
for (;;) {
Row refrow = refiterator.next();
if (refrow == null || refrow.isCascadeDeleted()
|| refindex.compareRowNonUnique(
session, mdata, m_columns,
refrow.getData()) != 0) {
break;
}
// -- if the constraint is a 'SET [DEFAULT|NULL]' constraint we have to keep
// -- a new record to be inserted after deleting the current. We also have to
// -- switch over to the 'checkCascadeUpdate' method below this level
if (isUpdate) {
Object[] rnd = reftable.getEmptyRowData();
System.arraycopy(refrow.getData(), 0, rnd, 0,
rnd.length);
if (c.getDeleteAction() == Constraint.SET_NULL) {
for (int j = 0; j < r_columns.length; j++) {
rnd[r_columns[j]] = null;
}
} else {
for (int j = 0; j < r_columns.length; j++) {
Column col = reftable.getColumn(r_columns[j]);
rnd[r_columns[j]] =
col.getDefaultValue(session);
}
}
if (hasref && path.add(c)) {
// fredt - avoid infinite recursion on circular references
// these can be rings of two or more mutually dependent tables
// so only one visit per constraint is allowed
checkCascadeUpdate(session, reftable, null,
refrow, rnd, r_columns, null,
path);
path.remove(c);
}
if (delete) {
// foreign key referencing own table - do not update the row to be deleted
if (reftable != table || !refrow.equals(row)) {
mergeUpdate(rowSet, refrow, rnd, r_columns);
}
}
} else if (hasref) {
if (reftable != table) {
if (path.add(c)) {
checkCascadeDelete(session, reftable,
tableUpdateLists, refrow,
delete, path);
path.remove(c);
}
} else {
// fredt - we avoid infinite recursion on the fk's referencing the same table
// but chained rows can result in very deep recursion and StackOverflowError
if (refrow != row) {
checkCascadeDelete(session, reftable,
tableUpdateLists, refrow,
delete, path);
}
}
}
if (delete && !isUpdate && !refrow.isCascadeDeleted()) {
reftable.deleteNoRefCheck(session, refrow);
}
}
} finally {
refiterator.release();
}
}
}
/**
* Check or perform an update cascade operation on a single row.
* Check or cascade an update (delete/insert) operation.
* The method takes a pair of rows (new data,old data) and checks
* if Constraints permit the update operation.
* A boolean arguement determines if the operation should
* realy take place or if we just have to check for constraint violation.
* fredt - cyclic conditions are now avoided by checking for second visit
* to each constraint. The set of list of updates for all tables is passed
* and filled in recursive calls.
*
* @param session current database session
* @param table
* @param tableUpdateLists lists of updates
* @param orow old row data to be deleted.
* @param nrow new row data to be inserted.
* @param cols indices of the columns actually changed.
* @param ref This should be initialized to null when the
* method is called from the 'outside'. During recursion this will be the
* current table (i.e. this) to indicate from where we came.
* Foreign keys to this table do not have to be checked since they have
* triggered the update and are valid by definition.
*
* @short Check or perform and update cascade operation on a single row.
*
*
*/
static void checkCascadeUpdate(Session session, Table table,
HashMappedList tableUpdateLists, Row orow,
Object[] nrow, int[] cols, Table ref,
HashSet path) throws HsqlException {
// -- We iterate through all constraints associated with this table
// --
for (int i = 0, size = table.constraintList.length; i < size; i++) {
Constraint c = table.constraintList[i];
if (c.getType() == Constraint.FOREIGN_KEY && c.getRef() != null) {
// -- (1) If it is a foreign key constraint we have to check if the
// -- main table still holds a record which allows the new values
// -- to be set in the updated columns. This test however will be
// -- skipped if the reference table is the main table since changes
// -- in the reference table triggered the update and therefor
// -- the referential integrity is guaranteed to be valid.
// --
if (ref == null || c.getMain() != ref) {
// -- common indexes of the changed columns and the main/ref constraint
if (ArrayUtil.countCommonElements(cols, c.getRefColumns())
== 0) {
// -- Table::checkCascadeUpdate -- NO common cols; reiterating
continue;
}
c.hasMainRef(session, nrow);
}
} else if (c.getType() == Constraint.MAIN && c.getRef() != null) {
// -- (2) If it happens to be a main constraint we check if the slave
// -- table holds any records refering to the old contents. If so,
// -- the constraint has to support an 'on update' action or we
// -- throw an exception (all via a call to Constraint.findFkRef).
// --
// -- If there are no common columns between the reference constraint
// -- and the changed columns, we reiterate.
int[] common = ArrayUtil.commonElements(cols,
c.getMainColumns());
if (common == null) {
// -- NO common cols between; reiterating
continue;
}
int[] m_columns = c.getMainColumns();
int[] r_columns = c.getRefColumns();
// fredt - find out if the FK columns have actually changed
boolean nochange = true;
for (int j = 0; j < m_columns.length; j++) {
if (!orow.getData()[m_columns[j]].equals(
nrow[m_columns[j]])) {
nochange = false;
break;
}
}
if (nochange) {
continue;
}
// there must be no record in the 'slave' table
// sebastian@scienion -- dependent on forDelete | forUpdate
RowIterator refiterator = c.findFkRef(session, orow.getData(),
false);
if (refiterator.hasNext()) {
if (c.core.updateAction == Constraint.NO_ACTION) {
throw Trace.error(Trace.INTEGRITY_CONSTRAINT_VIOLATION,
Trace.Constraint_violation,
new Object[] {
c.core.fkName.name, c.core.refTable.getName().name
});
}
} else {
// no referencing row found
continue;
}
Table reftable = c.getRef();
// -- unused shortcut when update table has no imported constraint
boolean hasref =
reftable.getNextConstraintIndex(0, Constraint.MAIN) != -1;
Index refindex = c.getRefIndex();
// -- walk the index for all the nodes that reference update node
HashMappedList rowSet =
(HashMappedList) tableUpdateLists.get(reftable);
if (rowSet == null) {
rowSet = new HashMappedList();
tableUpdateLists.add(reftable, rowSet);
}
for (Row refrow = refiterator.next(); ;
refrow = refiterator.next()) {
if (refrow == null
|| refindex.compareRowNonUnique(
session, orow.getData(), m_columns,
refrow.getData()) != 0) {
break;
}
Object[] rnd = reftable.getEmptyRowData();
System.arraycopy(refrow.getData(), 0, rnd, 0, rnd.length);
// -- Depending on the type constraint we are dealing with we have to
// -- fill up the forign key of the current record with different values
// -- And handle the insertion procedure differently.
if (c.getUpdateAction() == Constraint.SET_NULL) {
// -- set null; we do not have to check referential integrity any further
// -- since we are setting null
values
for (int j = 0; j < r_columns.length; j++) {
rnd[r_columns[j]] = null;
}
} else if (c.getUpdateAction() == Constraint.SET_DEFAULT) {
// -- set default; we check referential integrity with ref==null; since we manipulated
// -- the values and referential integrity is no longer guaranteed to be valid
for (int j = 0; j < r_columns.length; j++) {
Column col = reftable.getColumn(r_columns[j]);
rnd[r_columns[j]] = col.getDefaultValue(session);
}
if (path.add(c)) {
checkCascadeUpdate(session, reftable,
tableUpdateLists, refrow, rnd,
r_columns, null, path);
path.remove(c);
}
} else {
// -- cascade; standard recursive call. We inherit values from the foreign key
// -- table therefor we set ref==this.
for (int j = 0; j < m_columns.length; j++) {
rnd[r_columns[j]] = nrow[m_columns[j]];
}
if (path.add(c)) {
checkCascadeUpdate(session, reftable,
tableUpdateLists, refrow, rnd,
common, table, path);
path.remove(c);
}
}
mergeUpdate(rowSet, refrow, rnd, r_columns);
}
}
}
}
/**
* Merges a triggered change with a previous triggered change, or adds to
* list.
*/
static void mergeUpdate(HashMappedList rowSet, Row row, Object[] newData,
int[] cols) {
Object[] data = (Object[]) rowSet.get(row);
if (data != null) {
for (int j = 0; j < cols.length; j++) {
data[cols[j]] = newData[cols[j]];
}
} else {
rowSet.add(row, newData);
}
}
/**
* Merge the full triggered change with the updated row, or add to list.
* Return false if changes conflict.
*/
static boolean mergeKeepUpdate(Session session, HashMappedList rowSet,
int[] cols, int[] colTypes, Row row,
Object[] newData) throws HsqlException {
Object[] data = (Object[]) rowSet.get(row);
if (data != null) {
if (Index.compareRows(
session, row
.getData(), newData, cols, colTypes) != 0 && Index
.compareRows(
session, newData, data, cols, colTypes) != 0) {
return false;
}
for (int j = 0; j < cols.length; j++) {
newData[cols[j]] = data[cols[j]];
}
rowSet.put(row, newData);
} else {
rowSet.add(row, newData);
}
return true;
}
static void clearUpdateLists(HashMappedList tableUpdateList) {
for (int i = 0; i < tableUpdateList.size(); i++) {
HashMappedList updateList =
(HashMappedList) tableUpdateList.get(i);
updateList.clear();
}
}
/**
* Highest level multiple row delete method. Corresponds to an SQL
* DELETE.
*/
int delete(Session session,
HsqlArrayList deleteList) throws HsqlException {
HashSet path = constraintPath == null ? new HashSet()
: constraintPath;
constraintPath = null;
HashMappedList tUpdateList = tableUpdateList == null
? new HashMappedList()
: tableUpdateList;
tableUpdateList = null;
if (database.isReferentialIntegrity()) {
for (int i = 0; i < deleteList.size(); i++) {
Row row = (Row) deleteList.get(i);
path.clear();
checkCascadeDelete(session, this, tUpdateList, row, false,
path);
}
}
// check transactions
database.txManager.checkDelete(session, deleteList);
for (int i = 0; i < tUpdateList.size(); i++) {
Table table = (Table) tUpdateList.getKey(i);
HashMappedList updateList = (HashMappedList) tUpdateList.get(i);
database.txManager.checkDelete(session, updateList);
}
// perform delete
fireAll(session, Trigger.DELETE_BEFORE);
if (database.isReferentialIntegrity()) {
for (int i = 0; i < deleteList.size(); i++) {
Row row = (Row) deleteList.get(i);
path.clear();
checkCascadeDelete(session, this, tUpdateList, row, true,
path);
}
}
for (int i = 0; i < deleteList.size(); i++) {
Row row = (Row) deleteList.get(i);
if (!row.isCascadeDeleted()) {
deleteNoRefCheck(session, row);
}
}
for (int i = 0; i < tUpdateList.size(); i++) {
Table table = (Table) tUpdateList.getKey(i);
HashMappedList updateList = (HashMappedList) tUpdateList.get(i);
table.updateRowSet(session, updateList, null, false);
updateList.clear();
}
fireAll(session, Trigger.DELETE_AFTER);
path.clear();
constraintPath = path;
tableUpdateList = tUpdateList;
return deleteList.size();
}
/**
* Mid level row delete method. Fires triggers but no integrity
* constraint checks.
*/
private void deleteNoRefCheck(Session session,
Row row) throws HsqlException {
Object[] data = row.getData();
fireAll(session, Trigger.DELETE_BEFORE_ROW, data, null);
deleteNoCheck(session, row, true);
// fire the delete after statement trigger
fireAll(session, Trigger.DELETE_AFTER_ROW, data, null);
}
/**
* Low level row delete method. Removes the row from the indexes and
* from the Cache.
*/
private void deleteNoCheck(Session session, Row row,
boolean log) throws HsqlException {
if (row.isCascadeDeleted()) {
return;
}
Object[] data = row.getData();
row = row.getUpdatedRow();
for (int i = indexList.length - 1; i >= 0; i--) {
Node node = row.getNode(i);
indexList[i].delete(session, node);
}
row.delete();
if (session != null) {
session.addDeleteAction(this, row);
}
if (log && isLogged) {
database.logger.writeDeleteStatement(session, this, data);
}
}
/**
* For log statements.
*/
public void deleteNoCheckFromLog(Session session,
Object[] data) throws HsqlException {
Row row = null;
if (hasPrimaryKey()) {
RowIterator it = getPrimaryIndex().findFirstRow(session, data,
primaryKeyColsSequence);
row = it.next();
} else if (bestIndex == null) {
RowIterator it = getPrimaryIndex().firstRow(session);
while (true) {
row = it.next();
if (row == null) {
break;
}
if (Index.compareRows(
session, row.getData(), data, defaultColumnMap,
colTypes) == 0) {
break;
}
}
} else {
RowIterator it = bestIndex.findFirstRow(session, data);
while (true) {
row = it.next();
if (row == null) {
break;
}
Object[] rowdata = row.getData();
// reached end of range
if (bestIndex.compareRowNonUnique(
session, data, bestIndex.getColumns(), rowdata) != 0) {
row = null;
break;
}
if (Index.compareRows(
session, rowdata, data, defaultColumnMap,
colTypes) == 0) {
break;
}
}
}
if (row == null) {
return;
}
// not necessary for log deletes
database.txManager.checkDelete(session, row);
for (int i = indexList.length - 1; i >= 0; i--) {
Node node = row.getNode(i);
indexList[i].delete(session, node);
}
row.delete();
if (session != null) {
session.addDeleteAction(this, row);
}
}
/**
* Low level row delete method. Removes the row from the indexes and
* from the Cache. Used by rollback.
*/
void deleteNoCheckRollback(Session session, Row row,
boolean log) throws HsqlException {
row = indexList[0].findRow(session, row);
for (int i = indexList.length - 1; i >= 0; i--) {
Node node = row.getNode(i);
indexList[i].delete(session, node);
}
row.delete();
removeRowFromStore(row);
if (log && isLogged) {
database.logger.writeDeleteStatement(session, this, row.getData());
}
}
/**
* Highest level multiple row update method. Corresponds to an SQL
* UPDATE. To DEAL with unique constraints we need to perform all
* deletes at once before the inserts. If there is a UNIQUE constraint
* violation limited only to the duration of updating multiple rows,
* we don't want to abort the operation. Example:
* UPDATE MYTABLE SET UNIQUECOL = UNIQUECOL + 1
* After performing each cascade update, delete the main row.
* After all cascade ops and deletes have been performed, insert new
* rows.
*
* The following clauses from SQL Standard section 11.8 are enforced
* 9) Let ISS be the innermost SQL-statement being executed.
* 10) If evaluation of these General Rules during the execution of ISS
* would cause an update of some site to a value that is distinct from the
* value to which that site was previously updated during the execution of
* ISS, then an exception condition is raised: triggered data change
* violation.
* 11) If evaluation of these General Rules during the execution of ISS
* would cause deletion of a row containing a site that is identified for
* replacement in that row, then an exception condition is raised:
* triggered data change violation.
*
* (fredt)
*/
int update(Session session, HashMappedList updateList,
int[] cols) throws HsqlException {
HashSet path = constraintPath == null ? new HashSet()
: constraintPath;
constraintPath = null;
HashMappedList tUpdateList = tableUpdateList == null
? new HashMappedList()
: tableUpdateList;
tableUpdateList = null;
// set identity column where null and check columns
for (int i = 0; i < updateList.size(); i++) {
Object[] data = (Object[]) updateList.get(i);
// this means the identity column can be set to null to force
// creation of a new identity value
setIdentityColumn(session, data);
enforceFieldValueLimits(data, cols);
enforceNullConstraints(data);
}
// perform check/cascade operations
if (database.isReferentialIntegrity()) {
for (int i = 0; i < updateList.size(); i++) {
Object[] data = (Object[]) updateList.get(i);
Row row = (Row) updateList.getKey(i);
checkCascadeUpdate(session, this, tUpdateList, row, data,
cols, null, path);
}
}
fireAll(session, Trigger.UPDATE_BEFORE);
// merge any triggered change to this table with the update list
HashMappedList triggeredList = (HashMappedList) tUpdateList.get(this);
if (triggeredList != null) {
for (int i = 0; i < triggeredList.size(); i++) {
Row row = (Row) triggeredList.getKey(i);
Object[] data = (Object[]) triggeredList.get(i);
mergeKeepUpdate(session, updateList, cols, colTypes, row,
data);
}
triggeredList.clear();
}
// check transactions
for (int i = 0; i < tUpdateList.size(); i++) {
Table table = (Table) tUpdateList.getKey(i);
HashMappedList updateListT = (HashMappedList) tUpdateList.get(i);
database.txManager.checkDelete(session, updateListT);
}
database.txManager.checkDelete(session, updateList);
// update lists - main list last
for (int i = 0; i < tUpdateList.size(); i++) {
Table table = (Table) tUpdateList.getKey(i);
HashMappedList updateListT = (HashMappedList) tUpdateList.get(i);
table.updateRowSet(session, updateListT, null, false);
updateListT.clear();
}
updateRowSet(session, updateList, cols, true);
fireAll(session, Trigger.UPDATE_AFTER);
path.clear();
constraintPath = path;
tableUpdateList = tUpdateList;
clearUpdateLists(tableUpdateList);
return updateList.size();
}
void updateRowSet(Session session, HashMappedList rowSet, int[] cols,
boolean nodelete) throws HsqlException {
for (int i = rowSet.size() - 1; i >= 0; i--) {
Row row = (Row) rowSet.getKey(i);
Object[] data = (Object[]) rowSet.get(i);
if (row.isCascadeDeleted()) {
if (nodelete) {
throw Trace.error(Trace.TRIGGERED_DATA_CHANGE);
} else {
rowSet.remove(i);
continue;
}
}
for (int j = 0; j < constraintList.length; j++) {
Constraint c = constraintList[j];
if (c.getType() == Constraint.CHECK) {
c.checkCheckConstraint(session, data);
continue;
}
}
deleteNoCheck(session, row, true);
}
for (int i = 0; i < rowSet.size(); i++) {
Row row = (Row) rowSet.getKey(i);
Object[] data = (Object[]) rowSet.get(i);
if (triggerLists[Trigger.UPDATE_BEFORE_ROW] != null) {
fireAll(session, Trigger.UPDATE_BEFORE_ROW, row.getData(),
data);
checkRowDataUpdate(session, data, cols);
}
insertNoCheck(session, data);
if (triggerLists[Trigger.UPDATE_AFTER_ROW] != null) {
fireAll(session, Trigger.UPDATE_AFTER_ROW, row.getData(),
data);
checkRowDataUpdate(session, data, cols);
}
}
}
void checkRowDataInsert(Session session,
Object[] data) throws HsqlException {
enforceFieldValueLimits(data, null);
enforceNullConstraints(data);
if (database.isReferentialIntegrity()) {
for (int i = 0, size = constraintList.length; i < size; i++) {
constraintList[i].checkInsert(session, data);
}
}
}
void checkRowDataUpdate(Session session, Object[] data,
int[] cols) throws HsqlException {
enforceFieldValueLimits(data, cols);
enforceNullConstraints(data);
for (int j = 0; j < constraintList.length; j++) {
Constraint c = constraintList[j];
if (c.getType() == Constraint.CHECK) {
c.checkCheckConstraint(session, data);
}
}
}
/**
* True if table is CACHED or TEXT
*
* @return
*/
public boolean isCached() {
return isCached;
}
/**
* Returns true if table is CACHED
*/
boolean isIndexCached() {
return isCached;
}
/**
* Returns the index of the Index object of the given name or -1 if not found.
*/
int getIndexIndex(String indexName) {
Index[] indexes = indexList;
for (int i = 0; i < indexes.length; i++) {
if (indexName.equals(indexes[i].getName().name)) {
return i;
}
}
// no such index
return -1;
}
/**
* Returns the Index object of the given name or null if not found.
*/
Index getIndex(String indexName) {
Index[] indexes = indexList;
int i = getIndexIndex(indexName);
return i == -1 ? null
: indexes[i];
}
/**
* Return the position of the constraint within the list
*/
int getConstraintIndex(String constraintName) {
for (int i = 0, size = constraintList.length; i < size; i++) {
if (constraintList[i].getName().name.equals(constraintName)) {
return i;
}
}
return -1;
}
/**
* return the named constriant
*/
Constraint getConstraint(String constraintName) {
int i = getConstraintIndex(constraintName);
return (i < 0) ? null
: (Constraint) constraintList[i];
}
/**
* remove a named constraint
*/
void removeConstraint(String name) {
int index = getConstraintIndex(name);
constraintList =
(Constraint[]) ArrayUtil.toAdjustedArray(constraintList, null,
index, -1);
}
/**
* Returns the Column object at the given index
*/
Column getColumn(int i) {
return (Column) columnList.get(i);
}
void renameColumn(Column column, String newName,
boolean isquoted) throws HsqlException {
String oldname = column.columnName.name;
int i = getColumnNr(oldname);
columnList.setKey(i, newName);
column.columnName.rename(newName, isquoted);
renameColumnInCheckConstraints(oldname, newName, isquoted);
}
/**
* Returns an array of int valuse indicating the SQL type of the columns
*/
public int[] getColumnTypes() {
return colTypes;
}
/**
* Returns the Index object at the given index
*/
public Index getIndex(int i) {
return indexList[i];
}
public Index[] getIndexes() {
return indexList;
}
/**
* Used by CACHED tables to fetch a Row from the Cache, resulting in the
* Row being read from disk if it is not in the Cache.
*
* TEXT tables pass the memory resident Node parameter so that the Row
* and its index Nodes can be relinked.
*/
CachedRow getRow(int pos, Node primarynode) throws HsqlException {
if (isText) {
CachedDataRow row = (CachedDataRow) rowStore.get(pos);
row.nPrimaryNode = primarynode;
return row;
} else if (isCached) {
return (CachedRow) rowStore.get(pos);
}
return null;
}
/**
* As above, only for CACHED tables
*/
CachedRow getRow(int pos) {
return (CachedRow) rowStore.get(pos);
}
/**
* As above, only for CACHED tables
*/
CachedRow getRow(long id) {
return (CachedRow) rowStore.get((int) id);
}
/**
* called in autocommit mode or by transaction manager when a a delete is committed
*/
void removeRowFromStore(Row row) throws HsqlException {
if (isCached || isText && cache != null) {
rowStore.remove(row.getPos());
}
}
void releaseRowFromStore(Row row) throws HsqlException {
if (isCached || isText && cache != null) {
rowStore.release(row.getPos());
}
}
void commitRowToStore(Row row) {
if (isText && cache != null) {
rowStore.commit(row);
}
}
void indexRow(Session session, Row row) throws HsqlException {
int i = 0;
try {
for (; i < indexList.length; i++) {
indexList[i].insert(session, row, i);
}
} catch (HsqlException e) {
// unique index violation - rollback insert
for (--i; i >= 0; i--) {
Node n = row.getNode(i);
indexList[i].delete(session, n);
}
row.delete();
removeRowFromStore(row);
throw e;
}
}
/**
*
*/
void clearAllRows(Session session) {
for (int i = 0; i < indexList.length; i++) {
indexList[i].clearAll(session);
}
if (!isTemp) {
identitySequence.reset();
rowIdSequence.reset();
}
}
/** @todo -- release the rows */
void drop() throws HsqlException {}
boolean isWritable() {
return !isDataReadOnly() && !database.databaseReadOnly
&& !(database.isFilesReadOnly() && (isCached || isText));
}
/**
* Returns the catalog name or null, depending on a database property.
*/
String getCatalogName() {
// PRE: database is never null
return database.getProperties().isPropertyTrue("hsqldb.catalogs")
? database.getURI()
: null;
}
/**
* Returns the schema name.
*/
public String getSchemaName() {
return tableName.schema.name;
}
public int getRowCount(Session session) throws HsqlException {
return getPrimaryIndex().size(session);
}
/**
* Necessary when over Integer.MAX_VALUE Row objects have been generated
* for a memory table.
*/
public void resetRowId(Session session) throws HsqlException {
if (isCached) {
return;
}
rowIdSequence = new NumberSequence(null, 0, 1, Types.BIGINT);
RowIterator it = getPrimaryIndex().firstRow(session);;
while (it.hasNext()) {
Row row = it.next();
int pos = (int) rowIdSequence.getValue();
row.setPos(pos);
}
}
/**
* Factory method instantiates a Row based on table type.
*/
Row newRow(Object[] o) throws HsqlException {
Row row;
try {
if (isMemory) {
row = new Row(this, o);
int pos = (int) rowIdSequence.getValue();
row.setPos(pos);
} else {
row = CachedRow.newCachedRow(this, o);
rowStore.add(row);
}
} catch (IOException e) {
throw new HsqlException(
e, Trace.getMessage(Trace.GENERAL_IO_ERROR),
Trace.GENERAL_IO_ERROR);
}
return row;
}
Row restoreRow(Row oldrow) throws HsqlException {
Row row;
try {
if (isMemory) {
row = new Row(this, oldrow.oData);
row.setPos(oldrow.getPos());
} else {
row = CachedRow.newCachedRow(this, oldrow.oData);
row.setStorageSize(oldrow.getStorageSize());
row.setPos(oldrow.getPos());
rowStore.restore(row);
}
} catch (IOException e) {
throw new HsqlException(
e, Trace.getMessage(Trace.GENERAL_IO_ERROR),
Trace.GENERAL_IO_ERROR);
}
return row;
}
public class RowStore implements PersistentStore {
public CachedObject get(int i) {
try {
return cache.get(i, this, false);
} catch (HsqlException e) {
return null;
}
}
public CachedObject getKeep(int i) {
try {
return cache.get(i, this, true);
} catch (HsqlException e) {
return null;
}
}
public int getStorageSize(int i) {
try {
return cache.get(i, this, false).getStorageSize();
} catch (HsqlException e) {
return 0;
}
}
public void add(CachedObject row) throws IOException {
cache.add(row);
}
public void restore(CachedObject row) throws IOException {
cache.restore(row);
}
public CachedObject get(RowInputBinary in) {
try {
if (Table.this.isText) {
return new CachedDataRow(Table.this, in);
}
CachedObject row = new CachedRow(Table.this, in);
return row;
} catch (HsqlException e) {
return null;
} catch (IOException e) {
return null;
}
}
public CachedObject getNewInstance(int size) {
return null;
}
public void remove(int i) {
try {
cache.remove(i, this);
} catch (IOException e) {}
}
public void removePersistence(int i) {
try {
cache.removePersistence(i, this);
} catch (IOException e) {
//
}
}
public void release(int i) {
cache.release(i);
}
public void commit(CachedObject row) {
try {
if (Table.this.isText) {
cache.saveRow(row);
}
} catch (IOException e) {
//
}
}
}
}