X-Git-Url: https://git.tokkee.org/?a=blobdiff_plain;f=roundup%2Fbackends%2Frdbms_common.py;h=45078c44b0f3237e3bdcf31e4c4c89fdbcf53f94;hb=2bc6db4f83572c0b1cabbf31bb8b614d5d3a5e15;hp=77d02e7a9ca609988acf3c9629e880990b5a6ef5;hpb=5a59d241edbc71552748dd3fc7977a3f7d72322f;p=roundup.git diff --git a/roundup/backends/rdbms_common.py b/roundup/backends/rdbms_common.py index 77d02e7..45078c4 100644 --- a/roundup/backends/rdbms_common.py +++ b/roundup/backends/rdbms_common.py @@ -1,4 +1,4 @@ -# $Id: rdbms_common.py,v 1.50 2003-03-26 05:28:32 richard Exp $ +# $Id: rdbms_common.py,v 1.72 2003-12-05 09:47:46 richard Exp $ ''' Relational database (SQL) backend common code. Basics: @@ -69,13 +69,13 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): self.lockfile = None # open a connection to the database, creating the "conn" attribute - self.open_connection() + self.sql_open_connection() def clearCache(self): self.cache = {} self.cache_lru = [] - def open_connection(self): + def sql_open_connection(self): ''' Open a connection to the database, creating it if necessary ''' raise NotImplemented @@ -93,7 +93,12 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): def sql_fetchone(self): ''' Fetch a single row. If there's nothing to fetch, return None. ''' - raise NotImplemented + return self.cursor.fetchone() + + def sql_fetchall(self): + ''' Fetch all rows. If there's nothing to fetch, return []. + ''' + return self.cursor.fetchall() def sql_stringquote(self, value): ''' Quote the string so it's safe to put in the 'sql quotes' @@ -103,12 +108,14 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): def save_dbschema(self, schema): ''' Save the schema definition that the database currently implements ''' - raise NotImplemented + s = repr(self.database_schema) + self.sql('insert into schema values (%s)', (s,)) def load_dbschema(self): ''' Load the schema definition that the database currently implements ''' - raise NotImplemented + self.cursor.execute('select schema from schema') + return eval(self.cursor.fetchone()[0]) def post_init(self): ''' Called once the schema initialisation has finished. @@ -129,9 +136,11 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): self.database_schema[classname] = spec.schema() save = 1 - for classname in self.database_schema.keys(): + for classname, spec in self.database_schema.items(): if not self.classes.has_key(classname): - self.drop_class(classname) + self.drop_class(classname, spec) + del self.database_schema[classname] + save = 1 # update the database version of the schema if save: @@ -145,14 +154,8 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # commit self.conn.commit() - # figure the "curuserid" - if self.journaltag is None: - self.curuserid = None - elif self.journaltag == 'admin': - # admin user may not exist, but always has ID 1 - self.curuserid = '1' - else: - self.curuserid = self.user.lookup(self.journaltag) + def refresh_database(self): + self.post_init() def reindex(self): for klass in self.classes.values(): @@ -179,41 +182,36 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): cols.sort() return cols, mls - def update_class(self, spec, old_spec): + def update_class(self, spec, old_spec, force=0): ''' Determine the differences between the current spec and the - database version of the spec, and update where necessary - ''' - new_spec = spec - new_has = new_spec.properties.has_key + database version of the spec, and update where necessary. - new_spec = new_spec.schema() + If 'force' is true, update the database anyway. + ''' + new_has = spec.properties.has_key + new_spec = spec.schema() new_spec[1].sort() old_spec[1].sort() - if new_spec == old_spec: + if not force and new_spec == old_spec: # no changes return 0 if __debug__: print >>hyperdb.DEBUG, 'update_class FIRING' - # key property changed? - if old_spec[0] != new_spec[0]: - if __debug__: - print >>hyperdb.DEBUG, 'update_class setting keyprop', `spec[0]` - # XXX turn on indexing for the key property - # detect multilinks that have been removed, and drop their table old_has = {} for name,prop in old_spec[1]: old_has[name] = 1 - if not new_has(name) and isinstance(prop, Multilink): - # it's a multilink, and it's been removed - drop the old - # table - sql = 'drop table %s_%s'%(spec.classname, prop) - if __debug__: - print >>hyperdb.DEBUG, 'update_class', (self, sql) - self.cursor.execute(sql) + if new_has(name) or not isinstance(prop, Multilink): continue + # it's a multilink, and it's been removed - drop the old + # table. First drop indexes. + self.drop_multilink_table_indexes(spec.classname, ml) + sql = 'drop table %s_%s'%(spec.classname, prop) + if __debug__: + print >>hyperdb.DEBUG, 'update_class', (self, sql) + self.cursor.execute(sql) old_has = old_has.has_key # now figure how we populate the new table @@ -222,7 +220,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): for propname,x in new_spec[1]: prop = properties[propname] if isinstance(prop, Multilink): - if not old_has(propname): + if force or not old_has(propname): # we need to create the new table self.create_multilink_table(spec, propname) elif old_has(propname): @@ -240,6 +238,9 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): self.cursor.execute(sql) olddata = self.cursor.fetchall() + # TODO: update all the other index dropping code + self.drop_class_table_indexes(cn, old_spec[0]) + # drop the old table self.cursor.execute('drop table _%s'%cn) @@ -273,8 +274,56 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): print >>hyperdb.DEBUG, 'create_class', (self, sql) self.cursor.execute(sql) + self.create_class_table_indexes(spec) + return cols, mls + def create_class_table_indexes(self, spec): + ''' create the class table for the given spec + ''' + # create id index + index_sql1 = 'create index _%s_id_idx on _%s(id)'%( + spec.classname, spec.classname) + if __debug__: + print >>hyperdb.DEBUG, 'create_index', (self, index_sql1) + self.cursor.execute(index_sql1) + + # create __retired__ index + index_sql2 = 'create index _%s_retired_idx on _%s(__retired__)'%( + spec.classname, spec.classname) + if __debug__: + print >>hyperdb.DEBUG, 'create_index', (self, index_sql2) + self.cursor.execute(index_sql2) + + # create index for key property + if spec.key: + if __debug__: + print >>hyperdb.DEBUG, 'update_class setting keyprop %r'% \ + spec.key + index_sql3 = 'create index _%s_%s_idx on _%s(_%s)'%( + spec.classname, spec.key, + spec.classname, spec.key) + if __debug__: + print >>hyperdb.DEBUG, 'create_index', (self, index_sql3) + self.cursor.execute(index_sql3) + + def drop_class_table_indexes(self, cn, key): + # drop the old table indexes first + l = ['_%s_id_idx'%cn, '_%s_retired_idx'%cn] + if key: + # key prop too? + l.append('_%s_%s_idx'%(cn, key)) + + # TODO: update all the other index dropping code + table_name = '_%s'%cn + for index_name in l: + if not self.sql_index_exists(table_name, index_name): + continue + index_sql = 'drop index '+index_name + if __debug__: + print >>hyperdb.DEBUG, 'drop_index', (self, index_sql) + self.cursor.execute(index_sql) + def create_journal_table(self, spec): ''' create the journal table for a class given the spec and already-determined cols @@ -286,16 +335,65 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): if __debug__: print >>hyperdb.DEBUG, 'create_class', (self, sql) self.cursor.execute(sql) + self.create_journal_table_indexes(spec) + + def create_journal_table_indexes(self, spec): + # index on nodeid + index_sql = 'create index %s_journ_idx on %s__journal(nodeid)'%( + spec.classname, spec.classname) + if __debug__: + print >>hyperdb.DEBUG, 'create_index', (self, index_sql) + self.cursor.execute(index_sql) + + def drop_journal_table_indexes(self, classname): + index_name = '%s_journ_idx'%classname + if not self.sql_index_exists('%s__journal'%classname, index_name): + return + index_sql = 'drop index '+index_name + if __debug__: + print >>hyperdb.DEBUG, 'drop_index', (self, index_sql) + self.cursor.execute(index_sql) def create_multilink_table(self, spec, ml): ''' Create a multilink table for the "ml" property of the class given by the spec ''' + # create the table sql = 'create table %s_%s (linkid varchar, nodeid varchar)'%( spec.classname, ml) if __debug__: print >>hyperdb.DEBUG, 'create_class', (self, sql) self.cursor.execute(sql) + self.create_multilink_table_indexes(spec, ml) + + def create_multilink_table_indexes(self, spec, ml): + # create index on linkid + index_sql = 'create index %s_%s_l_idx on %s_%s(linkid)'%( + spec.classname, ml, spec.classname, ml) + if __debug__: + print >>hyperdb.DEBUG, 'create_index', (self, index_sql) + self.cursor.execute(index_sql) + + # create index on nodeid + index_sql = 'create index %s_%s_n_idx on %s_%s(nodeid)'%( + spec.classname, ml, spec.classname, ml) + if __debug__: + print >>hyperdb.DEBUG, 'create_index', (self, index_sql) + self.cursor.execute(index_sql) + + def drop_multilink_table_indexes(self, classname, ml): + l = [ + '%s_%s_l_idx'%(classname, ml), + '%s_%s_n_idx'%(classname, ml) + ] + table_name = '%s_%s'%(classname, ml) + for index_name in l: + if not self.sql_index_exists(table_name, index_name): + continue + index_sql = 'drop index %s'%index_name + if __debug__: + print >>hyperdb.DEBUG, 'drop_index', (self, index_sql) + self.cursor.execute(index_sql) def create_class(self, spec): ''' Create a database table according to the given spec. @@ -314,28 +412,35 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): print >>hyperdb.DEBUG, 'create_class', (self, sql, vals) self.cursor.execute(sql, vals) - def drop_class(self, spec): + def drop_class(self, cn, spec): ''' Drop the given table from the database. Drop the journal and multilink tables too. ''' + properties = spec[1] # figure the multilinks mls = [] - for col, prop in spec.properties.items(): + for propanme, prop in properties: if isinstance(prop, Multilink): - mls.append(col) + mls.append(propname) - sql = 'drop table _%s'%spec.classname + # drop class table and indexes + self.drop_class_table_indexes(cn, spec[0]) + sql = 'drop table _%s'%cn if __debug__: print >>hyperdb.DEBUG, 'drop_class', (self, sql) self.cursor.execute(sql) - sql = 'drop table %s__journal'%spec.classname + # drop journal table and indexes + self.drop_journal_table_indexes(cn) + sql = 'drop table %s__journal'%cn if __debug__: print >>hyperdb.DEBUG, 'drop_class', (self, sql) self.cursor.execute(sql) for ml in mls: + # drop multilink table and indexes + self.drop_multilink_table_indexes(cn, ml) sql = 'drop table %s_%s'%(spec.classname, ml) if __debug__: print >>hyperdb.DEBUG, 'drop_class', (self, sql) @@ -449,7 +554,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # calling code's node assumptions) node = node.copy() node['creation'] = node['activity'] = date.Date() - node['creator'] = self.curuserid + node['creator'] = self.getuid() # default the non-multilink columns for col, prop in cl.properties.items(): @@ -637,7 +742,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): for col in mls: # get the link ids sql = 'delete from %s_%s where nodeid=%s'%(classname, col, self.arg) - self.cursor.execute(sql, (nodeid,)) + self.sql(sql, (nodeid,)) # remove journal entries sql = 'delete from %s__journal where nodeid=%s'%(classname, self.arg) @@ -696,8 +801,14 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): p = password.Password() p.unpack(v) d[k] = p - elif (isinstance(prop, Boolean) or isinstance(prop, Number)) and v is not None: - d[k]=float(v) + elif isinstance(prop, Boolean) and v is not None: + d[k] = int(v) + elif isinstance(prop, Number) and v is not None: + # try int first, then assume it's a float + try: + d[k] = int(v) + except ValueError: + d[k] = float(v) else: d[k] = v return d @@ -739,7 +850,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): if creator: journaltag = creator else: - journaltag = self.curuserid + journaltag = self.getuid() if creation: journaldate = creation.serialise() else: @@ -755,12 +866,6 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): self.save_journal(classname, cols, nodeid, journaldate, journaltag, action, params) - def save_journal(self, classname, cols, nodeid, journaldate, - journaltag, action, params): - ''' Save the journal entry to the database - ''' - raise NotImplemented - def getjournal(self, classname, nodeid): ''' get the journal for id ''' @@ -771,10 +876,36 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): cols = ','.join('nodeid date tag action params'.split()) return self.load_journal(classname, cols, nodeid) + def save_journal(self, classname, cols, nodeid, journaldate, + journaltag, action, params): + ''' Save the journal entry to the database + ''' + # make the params db-friendly + params = repr(params) + entry = (nodeid, journaldate, journaltag, action, params) + + # do the insert + a = self.arg + sql = 'insert into %s__journal (%s) values (%s,%s,%s,%s,%s)'%(classname, + cols, a, a, a, a, a) + if __debug__: + print >>hyperdb.DEBUG, 'addjournal', (self, sql, entry) + self.cursor.execute(sql, entry) + def load_journal(self, classname, cols, nodeid): ''' Load the journal from the database ''' - raise NotImplemented + # now get the journal entries + sql = 'select %s from %s__journal where nodeid=%s'%(cols, classname, + self.arg) + if __debug__: + print >>hyperdb.DEBUG, 'load_journal', (self, sql, nodeid) + self.cursor.execute(sql, (nodeid,)) + res = [] + for nodeid, date_stamp, user, action, params in self.cursor.fetchall(): + params = eval(params) + res.append((nodeid, date.Date(date_stamp), user, action, params)) + return res def pack(self, pack_before): ''' Delete all journal entries except "create" before 'pack_before'. @@ -823,6 +954,9 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # clear out the transactions self.transactions = [] + def sql_rollback(self): + self.conn.rollback() + def rollback(self): ''' Reverse all actions from the current transaction. @@ -832,8 +966,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): if __debug__: print >>hyperdb.DEBUG, 'rollback', (self,) - # roll back - self.conn.rollback() + self.sql_rollback() # roll back "other" transaction stuff for method, args in self.transactions: @@ -851,10 +984,13 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # return the classname, nodeid so we reindex this content return (classname, nodeid) + def sql_close(self): + self.conn.close() + def close(self): ''' Close off the connection. ''' - self.conn.close() + self.sql_close() if self.lockfile is not None: locking.release_lock(self.lockfile) if self.lockfile is not None: @@ -1088,7 +1224,7 @@ class Class(hyperdb.Class): elif isinstance(proptype, hyperdb.Password): value = str(value) l.append(repr(value)) - l.append(self.is_retired(nodeid)) + l.append(repr(self.is_retired(nodeid))) return l def import_list(self, propnames, proplist): @@ -1123,6 +1259,9 @@ class Class(hyperdb.Class): if int(value): retire = 1 continue + elif value is None: + d[propname] = None + continue prop = properties[propname] if value is None: @@ -1142,6 +1281,9 @@ class Class(hyperdb.Class): if newid is None: newid = self.db.newid(self.classname) + # add the node and journal + self.db.addnode(self.classname, newid, d) + # retire? if retire: # use the arg for __retired__ to cope with any odd database type @@ -1152,9 +1294,6 @@ class Class(hyperdb.Class): print >>hyperdb.DEBUG, 'retire', (self, sql, newid) self.db.cursor.execute(sql, (1, newid)) - # add the node and journal - self.db.addnode(self.classname, newid, d) - # extract the extraneous journalling gumpf and nuke it if d.has_key('creator'): creator = d['creator'] @@ -1180,10 +1319,7 @@ class Class(hyperdb.Class): IndexError is raised. 'propname' must be the name of a property of this class or a KeyError is raised. - 'cache' indicates whether the transaction cache should be queried - for the node. If the node has been modified and you need to - determine what its values prior to modification are, you need to - set cache=0. + 'cache' exists for backwards compatibility, and is not used. ''' if propname == 'id': return nodeid @@ -1205,7 +1341,7 @@ class Class(hyperdb.Class): if d.has_key('creator'): return d['creator'] else: - return self.db.curuserid + return self.db.getuid() # get the property (raises KeyErorr if invalid) prop = self.properties[propname] @@ -1225,19 +1361,6 @@ class Class(hyperdb.Class): return d[propname] - def getnode(self, nodeid, cache=1): - ''' Return a convenience wrapper for the node. - - 'nodeid' must be the id of an existing node of this class or an - IndexError is raised. - - 'cache' indicates whether the transaction cache should be queried - for the node. If the node has been modified and you need to - determine what its values prior to modification are, you need to - set cache=0. - ''' - return Node(self, nodeid, cache=cache) - def set(self, nodeid, **propvalues): '''Modify a property on an existing node of this class. @@ -1479,7 +1602,7 @@ class Class(hyperdb.Class): self.fireReactors('retire', nodeid, None) def restore(self, nodeid): - '''Restpre a retired node. + '''Restore a retired node. Make node available for all operations like it was before retirement. ''' @@ -1571,7 +1694,8 @@ class Class(hyperdb.Class): None, or a TypeError is raised. The values of the key property on all existing nodes must be unique or a ValueError is raised. ''' - # XXX create an index on the key prop column + # XXX create an index on the key prop column. We should also + # record that we've created this index in the schema somewhere. prop = self.getprops()[propname] if not isinstance(prop, String): raise TypeError, 'key properties must be String' @@ -1637,8 +1761,8 @@ class Class(hyperdb.Class): 'propspec' consists of keyword args propname=nodeid or propname={nodeid:1, } 'propname' must be the name of a property in this class, or a - KeyError is raised. That property must be a Link or Multilink - property, or a TypeError is raised. + KeyError is raised. That property must be a Link or + Multilink property, or a TypeError is raised. Any node in this class whose 'propname' property links to any of the nodeids will be returned. Used by the full text indexing, which knows @@ -1664,15 +1788,19 @@ class Class(hyperdb.Class): raise TypeError, "'%s' not a Link/Multilink property"%propname # first, links - where = [] - allvalues = () + where = ['__retired__ = %s'] + allvalues = (0,) a = self.db.arg for prop, values in propspec: if not isinstance(props[prop], hyperdb.Link): continue + if type(values) is type({}) and len(values) == 1: + values = values.keys()[0] if type(values) is type(''): allvalues += (values,) where.append('_%s = %s'%(prop, a)) + elif values is None: + where.append('_%s is NULL'%prop) else: allvalues += tuple(values.keys()) where.append('_%s in (%s)'%(prop, ','.join([a]*len(values)))) @@ -1712,14 +1840,16 @@ class Class(hyperdb.Class): args = [] for propname in requirements.keys(): prop = self.properties[propname] - if isinstance(not prop, String): + if not isinstance(prop, String): raise TypeError, "'%s' not a String property"%propname where.append(propname) args.append(requirements[propname].lower()) # generate the where clause s = ' and '.join(['lower(_%s)=%s'%(col, self.db.arg) for col in where]) - sql = 'select id from _%s where %s'%(self.classname, s) + sql = 'select id from _%s where %s and __retired__=%s'%(self.classname, + s, self.db.arg) + args.append(0) self.db.sql(sql, tuple(args)) l = [x[0] for x in self.db.sql_fetchall()] if __debug__: @@ -1737,10 +1867,12 @@ class Class(hyperdb.Class): Set retired=None to get all nodes. Otherwise it'll get all the retired or non-retired nodes, depending on the flag. ''' - # flip the sense of the flag if we don't want all of them + # flip the sense of the 'retired' flag if we don't want all of them if retired is not None: - retired = not retired - args = (retired, ) + if retired: + args = (0, ) + else: + args = (1, ) sql = 'select id from _%s where __retired__ <> %s'%(self.classname, self.db.arg) else: @@ -1781,7 +1913,6 @@ class Class(hyperdb.Class): where = [] args = [] a = self.db.arg - print filterspec for k, v in filterspec.items(): propclass = props[k] # now do other where clause stuff @@ -1823,6 +1954,7 @@ class Class(hyperdb.Class): elif isinstance(propclass, Link): if isinstance(v, type([])): if '-1' in v: + v = v[:] v.remove('-1') xtra = ' or _%s is NULL'%k else: @@ -1850,10 +1982,10 @@ class Class(hyperdb.Class): # Try to filter on range of dates date_rng = Range(v, date.Date, offset=timezone) if (date_rng.from_value): - where.append('_%s > %s'%(k, a)) + where.append('_%s >= %s'%(k, a)) args.append(date_rng.from_value.serialise()) if (date_rng.to_value): - where.append('_%s < %s'%(k, a)) + where.append('_%s <= %s'%(k, a)) args.append(date_rng.to_value.serialise()) except ValueError: # If range creation fails - ignore that search parameter @@ -1864,8 +1996,20 @@ class Class(hyperdb.Class): where.append('_%s in (%s)'%(k, s)) args = args + [date.Interval(x).serialise() for x in v] else: - where.append('_%s=%s'%(k, a)) - args.append(date.Interval(v).serialise()) + try: + # Try to filter on range of intervals + date_rng = Range(v, date.Interval) + if (date_rng.from_value): + where.append('_%s >= %s'%(k, a)) + args.append(date_rng.from_value.serialise()) + if (date_rng.to_value): + where.append('_%s <= %s'%(k, a)) + args.append(date_rng.to_value.serialise()) + except ValueError: + # If range creation fails - ignore that search parameter + pass + #where.append('_%s=%s'%(k, a)) + #args.append(date.Interval(v).serialise()) else: if isinstance(v, type([])): s = ','.join([a for x in v]) @@ -1875,6 +2019,9 @@ class Class(hyperdb.Class): where.append('_%s=%s'%(k, a)) args.append(v) + # don't match retired nodes + where.append('__retired__ <> 1') + # add results of full text search if search_matches is not None: v = search_matches.keys() @@ -1929,14 +2076,15 @@ class Class(hyperdb.Class): args = tuple(args) if __debug__: print >>hyperdb.DEBUG, 'filter', (self, sql, args) - self.db.cursor.execute(sql, args) - l = self.db.cursor.fetchall() - print sql, l + if args: + self.db.cursor.execute(sql, args) + else: + # psycopg doesn't like empty args + self.db.cursor.execute(sql) + l = self.db.sql_fetchall() # return the IDs (the first column) - # XXX The filter(None, l) bit is sqlite-specific... if there's _NO_ - # XXX matches to a fetch, it returns NULL instead of nothing!?! - return filter(None, [row[0] for row in l]) + return [row[0] for row in l] def count(self): '''Get the number of nodes in this class. @@ -2073,7 +2221,9 @@ class FileClass(Class, hyperdb.FileClass): _marker = [] def get(self, nodeid, propname, default=_marker, cache=1): - ''' trap the content propname and get it from the file + ''' Trap the content propname and get it from the file + + 'cache' exists for backwards compatibility, and is not used. ''' poss_msg = 'Possibly a access right configuration problem.' if propname == 'content': @@ -2084,9 +2234,9 @@ class FileClass(Class, hyperdb.FileClass): return 'ERROR reading file: %s%s\n%s\n%s'%( self.classname, nodeid, poss_msg, strerror) if default is not self._marker: - return Class.get(self, nodeid, propname, default, cache=cache) + return Class.get(self, nodeid, propname, default) else: - return Class.get(self, nodeid, propname, cache=cache) + return Class.get(self, nodeid, propname) def getprops(self, protected=1): ''' In addition to the actual properties on the node, these methods