index 7388dbcb88d6807e35712e3ef049e3b441d04b9e..4c5760cdc56d4f3cb4594cb3c8a6ea0395460fc4 100644 (file)
-# $Id: rdbms_common.py,v 1.39 2003-03-06 06:03:51 richard Exp $
+# $Id: rdbms_common.py,v 1.66 2003-10-25 22:53:26 richard Exp $
''' Relational database (SQL) backend common code.
Basics:
sql_* methods, since everything else should be fairly generic. There's
probably a bit of work to be done if a database is used that actually
honors column typing, since the initial databases don't (sqlite stores
-everything as a string, and gadfly stores anything that's marsallable).
+everything as a string.)
'''
# standard python modules
from blobfiles import FileStorage
from roundup.indexer import Indexer
from sessions import Sessions, OneTimeKeys
+from roundup.date import Range
# number of rows to keep in memory
ROW_CACHE_SIZE = 100
self.database_schema[classname] = spec.schema()
save = 1
- for classname in self.database_schema.keys():
+ for classname, spec in self.database_schema.items():
if not self.classes.has_key(classname):
- self.drop_class(classname)
+ self.drop_class(classname, spec)
+ del self.database_schema[classname]
+ save = 1
# update the database version of the schema
if save:
# commit
self.conn.commit()
- # figure the "curuserid"
- if self.journaltag is None:
- self.curuserid = None
- elif self.journaltag == 'admin':
- # admin user may not exist, but always has ID 1
- self.curuserid = '1'
- else:
- self.curuserid = self.user.lookup(self.journaltag)
+ def refresh_database(self):
+ # now detect changes in the schema
+ for classname, spec in self.classes.items():
+ dbspec = self.database_schema[classname]
+ self.update_class(spec, dbspec, force=1)
+ self.database_schema[classname] = spec.schema()
+ # update the database version of the schema
+ self.sql('delete from schema')
+ self.save_dbschema(self.database_schema)
+ # reindex the db
+ self.reindex()
+ # commit
+ self.conn.commit()
+
def reindex(self):
for klass in self.classes.values():
cols.sort()
return cols, mls
- def update_class(self, spec, old_spec):
+ def update_class(self, spec, old_spec, force=0):
''' Determine the differences between the current spec and the
- database version of the spec, and update where necessary
- '''
- new_spec = spec
- new_has = new_spec.properties.has_key
-
- new_spec = new_spec.schema()
- if new_spec == old_spec:
+ database version of the spec, and update where necessary.
+ If 'force' is true, update the database anyway.
+ '''
+ new_has = spec.properties.has_key
+ new_spec = spec.schema()
+ new_spec[1].sort()
+ old_spec[1].sort()
+ if not force and new_spec == old_spec:
# no changes
return 0
if __debug__:
print >>hyperdb.DEBUG, 'update_class FIRING'
- # key property changed?
- if old_spec[0] != new_spec[0]:
- if __debug__:
- print >>hyperdb.DEBUG, 'update_class setting keyprop', `spec[0]`
- # XXX turn on indexing for the key property
-
# detect multilinks that have been removed, and drop their table
old_has = {}
for name,prop in old_spec[1]:
old_has[name] = 1
- if not new_has(name) and isinstance(prop, Multilink):
+ if (force or not new_has(name)) and isinstance(prop, Multilink):
# it's a multilink, and it's been removed - drop the old
- # table
+ # table. First drop indexes.
+ index_sqls = [ 'drop index %s_%s_l_idx'%(spec.classname, ml),
+ 'drop index %s_%s_n_idx'%(spec.classname, ml) ]
+ for index_sql in index_sqls:
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ try:
+ self.cursor.execute(index_sql)
+ except:
+ # The database may not actually have any indexes.
+ # assume the worst.
+ pass
sql = 'drop table %s_%s'%(spec.classname, prop)
if __debug__:
print >>hyperdb.DEBUG, 'update_class', (self, sql)
old_has = old_has.has_key
# now figure how we populate the new table
- fetch = [] # fetch these from the old table
+ fetch = ['_activity', '_creation', '_creator']
properties = spec.getprops()
for propname,x in new_spec[1]:
prop = properties[propname]
if isinstance(prop, Multilink):
- if not old_has(propname):
+ if force or not old_has(propname):
# we need to create the new table
self.create_multilink_table(spec, propname)
elif old_has(propname):
self.cursor.execute(sql)
olddata = self.cursor.fetchall()
+ # drop the old table indexes first
+ index_sqls = [ 'drop index _%s_id_idx'%cn,
+ 'drop index _%s_retired_idx'%cn ]
+ if old_spec[0]:
+ index_sqls.append('drop index _%s_%s_idx'%(cn, old_spec[0]))
+ for index_sql in index_sqls:
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ try:
+ self.cursor.execute(index_sql)
+ except:
+ # The database may not actually have any indexes.
+ # assume the worst.
+ pass
+
# drop the old table
self.cursor.execute('drop table _%s'%cn)
if __debug__:
print >>hyperdb.DEBUG, 'update_class', (self, sql, olddata[0])
for entry in olddata:
- self.cursor.execute(sql, *entry)
+ self.cursor.execute(sql, tuple(entry))
return 1
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ # create id index
+ index_sql1 = 'create index _%s_id_idx on _%s(id)'%(
+ spec.classname, spec.classname)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'create_index', (self, index_sql1)
+ self.cursor.execute(index_sql1)
+
+ # create __retired__ index
+ index_sql2 = 'create index _%s_retired_idx on _%s(__retired__)'%(
+ spec.classname, spec.classname)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'create_index', (self, index_sql2)
+ self.cursor.execute(index_sql2)
+
+ # create index for key property
+ if spec.key:
+ if __debug__:
+ print >>hyperdb.DEBUG, 'update_class setting keyprop %r'% \
+ spec.key
+ index_sql3 = 'create index _%s_%s_idx on _%s(_%s)'%(
+ spec.classname, spec.key,
+ spec.classname, spec.key)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'create_index', (self, index_sql3)
+ self.cursor.execute(index_sql3)
+
return cols, mls
def create_journal_table(self, spec):
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ # index on nodeid
+ index_sql = 'create index %s_journ_idx on %s__journal(nodeid)'%(
+ spec.classname, spec.classname)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'create_index', (self, index_sql)
+ self.cursor.execute(index_sql)
+
def create_multilink_table(self, spec, ml):
''' Create a multilink table for the "ml" property of the class
given by the spec
'''
+ # create the table
sql = 'create table %s_%s (linkid varchar, nodeid varchar)'%(
spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ # create index on linkid
+ index_sql = 'create index %s_%s_l_idx on %s_%s(linkid)'%(
+ spec.classname, ml, spec.classname, ml)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'create_index', (self, index_sql)
+ self.cursor.execute(index_sql)
+
+ # create index on nodeid
+ index_sql = 'create index %s_%s_n_idx on %s_%s(nodeid)'%(
+ spec.classname, ml, spec.classname, ml)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'create_index', (self, index_sql)
+ self.cursor.execute(index_sql)
+
def create_class(self, spec):
''' Create a database table according to the given spec.
'''
print >>hyperdb.DEBUG, 'create_class', (self, sql, vals)
self.cursor.execute(sql, vals)
- def drop_class(self, spec):
+ def drop_class(self, cn, spec):
''' Drop the given table from the database.
Drop the journal and multilink tables too.
'''
+ properties = spec[1]
# figure the multilinks
mls = []
- for col, prop in spec.properties.items():
+ for propanme, prop in properties:
if isinstance(prop, Multilink):
- mls.append(col)
+ mls.append(propname)
+
+ index_sqls = [ 'drop index _%s_id_idx'%cn,
+ 'drop index _%s_retired_idx'%cn,
+ 'drop index %s_journ_idx'%cn ]
+ if spec[0]:
+ index_sqls.append('drop index _%s_%s_idx'%(cn, spec[0]))
+ for index_sql in index_sqls:
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ try:
+ self.cursor.execute(index_sql)
+ except:
+ # The database may not actually have any indexes.
+ # assume the worst.
+ pass
- sql = 'drop table _%s'%spec.classname
+ sql = 'drop table _%s'%cn
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
self.cursor.execute(sql)
- sql = 'drop table %s__journal'%spec.classname
+ sql = 'drop table %s__journal'%cn
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
self.cursor.execute(sql)
for ml in mls:
+ index_sqls = [
+ 'drop index %s_%s_n_idx'%(cn, ml),
+ 'drop index %s_%s_l_idx'%(cn, ml),
+ ]
+ for index_sql in index_sqls:
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ try:
+ self.cursor.execute(index_sql)
+ except:
+ # The database may not actually have any indexes.
+ # assume the worst.
+ pass
sql = 'drop table %s_%s'%(spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
#
# Nodes
#
-
def addnode(self, classname, nodeid, node):
''' Add the specified node to its class's db.
'''
if __debug__:
print >>hyperdb.DEBUG, 'addnode', (self, classname, nodeid, node)
- # gadfly requires values for all non-multilink columns
+
+ # determine the column definitions and multilink tables
cl = self.classes[classname]
cols, mls = self.determine_columns(cl.properties.items())
# calling code's node assumptions)
node = node.copy()
node['creation'] = node['activity'] = date.Date()
- node['creator'] = self.curuserid
+ node['creator'] = self.getuid()
# default the non-multilink columns
for col, prop in cl.properties.items():
- if not isinstance(col, Multilink):
- if not node.has_key(col):
+ if not node.has_key(col):
+ if isinstance(prop, Multilink):
+ node[col] = []
+ else:
node[col] = None
# clear this node out of the cache if it's in there
if creator:
journaltag = creator
else:
- journaltag = self.curuserid
+ journaltag = self.getuid()
if creation:
journaldate = creation.serialise()
else:
# do the db-related init stuff
db.addclass(self)
- self.auditors = {'create': [], 'set': [], 'retire': []}
- self.reactors = {'create': [], 'set': [], 'retire': []}
+ self.auditors = {'create': [], 'set': [], 'retire': [], 'restore': []}
+ self.reactors = {'create': [], 'set': [], 'retire': [], 'restore': []}
def schema(self):
''' A dumpable version of the schema that we can store in the
# make the new node's property map
d = {}
+ retire = 0
+ newid = None
for i in range(len(propnames)):
# Use eval to reverse the repr() used to output the CSV
value = eval(proplist[i])
# Figure the property for this column
propname = propnames[i]
- prop = properties[propname]
# "unmarshal" where necessary
if propname == 'id':
newid = value
continue
+ elif propname == 'is retired':
+ # is the item retired?
+ if int(value):
+ retire = 1
+ continue
elif value is None:
+ d[propname] = None
+ continue
+
+ prop = properties[propname]
+ if value is None:
# don't set Nones
continue
elif isinstance(prop, hyperdb.Date):
value = pwd
d[propname] = value
+ # get a new id if necessary
+ if newid is None:
+ newid = self.db.newid(self.classname)
+
# retire?
- if int(proplist[-1]):
+ if retire:
# use the arg for __retired__ to cope with any odd database type
# conversion (hello, sqlite)
sql = 'update _%s set __retired__=%s where id=%s'%(self.classname,
IndexError is raised. 'propname' must be the name of a property
of this class or a KeyError is raised.
- 'cache' indicates whether the transaction cache should be queried
- for the node. If the node has been modified and you need to
- determine what its values prior to modification are, you need to
- set cache=0.
+ 'cache' exists for backwards compatibility, and is not used.
'''
if propname == 'id':
return nodeid
if d.has_key('creator'):
return d['creator']
else:
- return self.db.curuserid
+ return self.db.getuid()
# get the property (raises KeyErorr if invalid)
prop = self.properties[propname]
'nodeid' must be the id of an existing node of this class or an
IndexError is raised.
- 'cache' indicates whether the transaction cache should be queried
- for the node. If the node has been modified and you need to
- determine what its values prior to modification are, you need to
- set cache=0.
+ 'cache' exists for backwards compatibility, and is not used.
'''
- return Node(self, nodeid, cache=cache)
+ return Node(self, nodeid)
def set(self, nodeid, **propvalues):
'''Modify a property on an existing node of this class.
if __debug__:
print >>hyperdb.DEBUG, 'retire', (self, sql, nodeid)
self.db.cursor.execute(sql, (1, nodeid))
+ if self.do_journal:
+ self.db.addjournal(self.classname, nodeid, 'retired', None)
self.fireReactors('retire', nodeid, None)
+ def restore(self, nodeid):
+ '''Restore a retired node.
+
+ Make node available for all operations like it was before retirement.
+ '''
+ if self.db.journaltag is None:
+ raise DatabaseError, 'Database open read-only'
+
+ node = self.db.getnode(self.classname, nodeid)
+ # check if key property was overrided
+ key = self.getkey()
+ try:
+ id = self.lookup(node[key])
+ except KeyError:
+ pass
+ else:
+ raise KeyError, "Key property (%s) of retired node clashes with \
+ existing one (%s)" % (key, node[key])
+
+ self.fireAuditors('restore', nodeid, None)
+ # use the arg for __retired__ to cope with any odd database type
+ # conversion (hello, sqlite)
+ sql = 'update _%s set __retired__=%s where id=%s'%(self.classname,
+ self.db.arg, self.db.arg)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'restore', (self, sql, nodeid)
+ self.db.cursor.execute(sql, (0, nodeid))
+ if self.do_journal:
+ self.db.addjournal(self.classname, nodeid, 'restored', None)
+
+ self.fireReactors('restore', nodeid, None)
+
def is_retired(self, nodeid):
'''Return true if the node is rerired
'''
None, or a TypeError is raised. The values of the key property on
all existing nodes must be unique or a ValueError is raised.
'''
- # XXX create an index on the key prop column
+ # XXX create an index on the key prop column. We should also
+ # record that we've created this index in the schema somewhere.
prop = self.getprops()[propname]
if not isinstance(prop, String):
raise TypeError, 'key properties must be String'
if type(values) is type(''):
allvalues += (values,)
where.append('_%s = %s'%(prop, a))
+ elif values is None:
+ where.append('_%s is NULL'%prop)
else:
allvalues += tuple(values.keys())
where.append('_%s in (%s)'%(prop, ','.join([a]*len(values))))
args.append(requirements[propname].lower())
# generate the where clause
- s = ' and '.join(['_%s=%s'%(col, self.db.arg) for col in where])
+ s = ' and '.join(['lower(_%s)=%s'%(col, self.db.arg) for col in where])
sql = 'select id from _%s where %s'%(self.classname, s)
self.db.sql(sql, tuple(args))
l = [x[0] for x in self.db.sql_fetchall()]
Set retired=None to get all nodes. Otherwise it'll get all the
retired or non-retired nodes, depending on the flag.
'''
- # flip the sense of the flag if we don't want all of them
+ # flip the sense of the 'retired' flag if we don't want all of them
if retired is not None:
- retired = not retired
- sql = 'select id from _%s where __retired__ <> %s'%(self.classname,
- self.db.arg)
+ if retired:
+ args = (0, )
+ else:
+ args = (1, )
+ sql = 'select id from _%s where __retired__ <> %s'%(self.classname,
+ self.db.arg)
+ else:
+ args = ()
+ sql = 'select id from _%s'%self.classname
if __debug__:
print >>hyperdb.DEBUG, 'getnodeids', (self, sql, retired)
- self.db.cursor.execute(sql, (retired,))
- return [x[0] for x in self.db.cursor.fetchall()]
+ self.db.cursor.execute(sql, args)
+ ids = [x[0] for x in self.db.cursor.fetchall()]
+ return ids
def filter(self, search_matches, filterspec, sort=(None,None),
group=(None,None)):
cn = self.classname
+ timezone = self.db.getUserTimezone()
+
# figure the WHERE clause from the filterspec
props = self.getprops()
frum = ['_'+cn]
# now do other where clause stuff
if isinstance(propclass, Multilink):
tn = '%s_%s'%(cn, k)
- frum.append(tn)
- if isinstance(v, type([])):
+ if v in ('-1', ['-1']):
+ # only match rows that have count(linkid)=0 in the
+ # corresponding multilink table)
+ where.append('id not in (select nodeid from %s)'%tn)
+ elif isinstance(v, type([])):
+ frum.append(tn)
s = ','.join([a for x in v])
where.append('id=%s.nodeid and %s.linkid in (%s)'%(tn,tn,s))
args = args + v
else:
- where.append('id=%s.nodeid and %s.linkid = %s'%(tn, tn, a))
+ frum.append(tn)
+ where.append('id=%s.nodeid and %s.linkid=%s'%(tn, tn, a))
args.append(v)
elif k == 'id':
if isinstance(v, type([])):
elif isinstance(propclass, Link):
if isinstance(v, type([])):
if '-1' in v:
+ v = v[:]
v.remove('-1')
xtra = ' or _%s is NULL'%k
else:
where.append('_%s in (%s)'%(k, s))
args = args + [date.Date(x).serialise() for x in v]
else:
- where.append('_%s=%s'%(k, a))
- args.append(date.Date(v).serialise())
+ try:
+ # Try to filter on range of dates
+ date_rng = Range(v, date.Date, offset=timezone)
+ if (date_rng.from_value):
+ where.append('_%s >= %s'%(k, a))
+ args.append(date_rng.from_value.serialise())
+ if (date_rng.to_value):
+ where.append('_%s <= %s'%(k, a))
+ args.append(date_rng.to_value.serialise())
+ except ValueError:
+ # If range creation fails - ignore that search parameter
+ pass
elif isinstance(propclass, Interval):
if isinstance(v, type([])):
s = ','.join([a for x in v])
where.append('_%s in (%s)'%(k, s))
args = args + [date.Interval(x).serialise() for x in v]
else:
- where.append('_%s=%s'%(k, a))
- args.append(date.Interval(v).serialise())
+ try:
+ # Try to filter on range of intervals
+ date_rng = Range(v, date.Interval)
+ if (date_rng.from_value):
+ where.append('_%s >= %s'%(k, a))
+ args.append(date_rng.from_value.serialise())
+ if (date_rng.to_value):
+ where.append('_%s <= %s'%(k, a))
+ args.append(date_rng.to_value.serialise())
+ except ValueError:
+ # If range creation fails - ignore that search parameter
+ pass
+ #where.append('_%s=%s'%(k, a))
+ #args.append(date.Interval(v).serialise())
else:
if isinstance(v, type([])):
s = ','.join([a for x in v])
where.append('_%s=%s'%(k, a))
args.append(v)
+ # don't match retired nodes
+ where.append('__retired__ <> 1')
+
# add results of full text search
if search_matches is not None:
v = search_matches.keys()
_marker = []
def get(self, nodeid, propname, default=_marker, cache=1):
- ''' trap the content propname and get it from the file
+ ''' Trap the content propname and get it from the file
+
+ 'cache' exists for backwards compatibility, and is not used.
'''
poss_msg = 'Possibly a access right configuration problem.'
if propname == 'content':
return 'ERROR reading file: %s%s\n%s\n%s'%(
self.classname, nodeid, poss_msg, strerror)
if default is not self._marker:
- return Class.get(self, nodeid, propname, default, cache=cache)
+ return Class.get(self, nodeid, propname, default)
else:
- return Class.get(self, nodeid, propname, cache=cache)
+ return Class.get(self, nodeid, propname)
def getprops(self, protected=1):
''' In addition to the actual properties on the node, these methods