index d264a581b8d0f0d4c4b83ca1c19c6695fcb51afd..b0eb498b0c0df99ae5fdefe13c49f18edca702d2 100644 (file)
-# $Id: rdbms_common.py,v 1.1 2002-09-18 05:07:47 richard Exp $
+# $Id: rdbms_common.py,v 1.15 2002-09-24 01:59:28 richard Exp $
# standard python modules
import sys, os, time, re, errno, weakref, copy
from roundup.indexer import Indexer
from sessions import Sessions
+# number of rows to keep in memory
+ROW_CACHE_SIZE = 100
+
class Database(FileStorage, hyperdb.Database, roundupdb.Database):
- # flag to set on retired entries
- RETIRED_FLAG = '__hyperdb_retired'
+ ''' Wrapper around an SQL database that presents a hyperdb interface.
+ - some functionality is specific to the actual SQL database, hence
+ the sql_* methods that are NotImplemented
+ - we keep a cache of the latest ROW_CACHE_SIZE row fetches.
+ '''
def __init__(self, config, journaltag=None):
''' Open the database and load the schema from it.
'''
# additional transaction support for external files and the like
self.transactions = []
+ # keep a cache of the N most recently retrieved rows of any kind
+ # (classname, nodeid) = row
+ self.cache = {}
+ self.cache_lru = []
+
# open a connection to the database, creating the "conn" attribute
self.open_connection()
+ def clearCache(self):
+ self.cache = {}
+ self.cache_lru = []
+
def open_connection(self):
''' Open a connection to the database, creating it if necessary
'''
raise NotImplemented
- def sql(self, cursor, sql, args=None):
+ def sql(self, sql, args=None):
''' Execute the sql with the optional args.
'''
if __debug__:
print >>hyperdb.DEBUG, (self, sql, args)
if args:
- cursor.execute(sql, args)
+ self.cursor.execute(sql, args)
else:
- cursor.execute(sql)
+ self.cursor.execute(sql)
- def sql_fetchone(self, cursor):
+ def sql_fetchone(self):
''' Fetch a single row. If there's nothing to fetch, return None.
'''
raise NotImplemented
- def save_dbschema(self, cursor, schema):
+ def sql_stringquote(self, value):
+ ''' Quote the string so it's safe to put in the 'sql quotes'
+ '''
+ return re.sub("'", "''", str(value))
+
+ def save_dbschema(self, schema):
''' Save the schema definition that the database currently implements
'''
raise NotImplemented
- def load_dbschema(self, cursor):
+ def load_dbschema(self):
''' Load the schema definition that the database currently implements
'''
raise NotImplemented
attribute actually matches the schema in the database.
'''
# now detect changes in the schema
+ save = 0
for classname, spec in self.classes.items():
if self.database_schema.has_key(classname):
dbspec = self.database_schema[classname]
- self.update_class(spec, dbspec)
- self.database_schema[classname] = spec.schema()
+ if self.update_class(spec, dbspec):
+ self.database_schema[classname] = spec.schema()
+ save = 1
else:
self.create_class(spec)
self.database_schema[classname] = spec.schema()
+ save = 1
for classname in self.database_schema.keys():
if not self.classes.has_key(classname):
self.drop_class(classname)
# update the database version of the schema
- cursor = self.conn.cursor()
- self.sql(cursor, 'delete from schema')
- self.save_dbschema(cursor, self.database_schema)
+ if save:
+ self.sql('delete from schema')
+ self.save_dbschema(self.database_schema)
# reindex the db if necessary
if self.indexer.should_reindex():
# commit
self.conn.commit()
+ # figure the "curuserid"
+ if self.journaltag is None:
+ self.curuserid = None
+ elif self.journaltag == 'admin':
+ # admin user may not exist, but always has ID 1
+ self.curuserid = '1'
+ else:
+ self.curuserid = self.user.lookup(self.journaltag)
+
def reindex(self):
for klass in self.classes.values():
for nodeid in klass.list():
"properties" is a list of (name, prop) where prop may be an
instance of a hyperdb "type" _or_ a string repr of that type.
'''
- cols = []
+ cols = ['_activity', '_creator', '_creation']
mls = []
# add the multilinks separately
for col, prop in properties:
'''
spec_schema = spec.schema()
if spec_schema == dbspec:
- return
+ # no save needed for this one
+ return 0
if __debug__:
print >>hyperdb.DEBUG, 'update_class FIRING'
dbspec_propnames.append(propname)
dbspec_props[propname] = prop
- # we're going to need one of these
- cursor = self.conn.cursor()
-
# now compare
for propname in spec_propnames:
prop = spec_props[propname]
# add the property
if isinstance(prop, Multilink):
# all we have to do here is create a new table, easy!
- self.create_multilink_table(cursor, spec, propname)
+ self.create_multilink_table(spec, propname)
continue
# no ALTER TABLE, so we:
sql = 'select %s,%s from _%s'%(','.join(oldcols), self.arg, cn)
if __debug__:
print >>hyperdb.DEBUG, 'update_class', (self, sql, None)
- cursor.execute(sql, (None,))
- olddata = cursor.fetchall()
+ self.cursor.execute(sql, (None,))
+ olddata = self.cursor.fetchall()
# 2. drop the old table
- cursor.execute('drop table _%s'%cn)
+ self.cursor.execute('drop table _%s'%cn)
# 3. create the new table
- cols, mls = self.create_class_table(cursor, spec)
+ cols, mls = self.create_class_table(spec)
# ensure the new column is last
cols.remove('_'+propname)
assert oldcols == cols, "Column lists don't match!"
# do the insert
for row in olddata:
- self.sql(cursor, sql, tuple(row))
+ self.sql(sql, tuple(row))
else:
# modify the property
sql = 'drop table %s_%s'%(spec.classname, prop)
if __debug__:
print >>hyperdb.DEBUG, 'update_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
else:
# no ALTER TABLE, so we:
# 1. pull out the data, excluding the removed column
oldcols.remove('_'+propname)
cn = spec.classname
sql = 'select %s from _%s'%(','.join(oldcols), cn)
- cursor.execute(sql, (None,))
+ self.cursor.execute(sql, (None,))
olddata = sql.fetchall()
# 2. drop the old table
- cursor.execute('drop table _%s'%cn)
+ self.cursor.execute('drop table _%s'%cn)
# 3. create the new table
- cols, mls = self.create_class_table(self, cursor, spec)
+ cols, mls = self.create_class_table(self, spec)
assert oldcols != cols, "Column lists don't match!"
# 4. populate with the data from step one
qs = ','.join([self.arg for x in cols])
sql = 'insert into _%s values (%s)'%(cn, s)
- cursor.execute(sql, olddata)
+ self.cursor.execute(sql, olddata)
+ return 1
- def create_class_table(self, cursor, spec):
+ def create_class_table(self, spec):
''' create the class table for the given spec
'''
cols, mls = self.determine_columns(spec.properties.items())
sql = 'create table _%s (%s)'%(spec.classname, scols)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
return cols, mls
- def create_journal_table(self, cursor, spec):
+ def create_journal_table(self, spec):
''' create the journal table for a class given the spec and
already-determined cols
'''
sql = 'create table %s__journal (%s)'%(spec.classname, cols)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
- def create_multilink_table(self, cursor, spec, ml):
+ def create_multilink_table(self, spec, ml):
''' Create a multilink table for the "ml" property of the class
given by the spec
'''
spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
def create_class(self, spec):
''' Create a database table according to the given spec.
'''
- cursor = self.conn.cursor()
- cols, mls = self.create_class_table(cursor, spec)
- self.create_journal_table(cursor, spec)
+ cols, mls = self.create_class_table(spec)
+ self.create_journal_table(spec)
# now create the multilink tables
for ml in mls:
- self.create_multilink_table(cursor, spec, ml)
+ self.create_multilink_table(spec, ml)
# ID counter
sql = 'insert into ids (name, num) values (%s,%s)'%(self.arg, self.arg)
vals = (spec.classname, 1)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
def drop_class(self, spec):
''' Drop the given table from the database.
for col, prop in spec.properties.items():
if isinstance(prop, Multilink):
mls.append(col)
- cursor = self.conn.cursor()
sql = 'drop table _%s'%spec.classname
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
sql = 'drop table %s__journal'%spec.classname
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
for ml in mls:
sql = 'drop table %s_%s'%(spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
#
# Classes
'''
if __debug__:
print >>hyperdb.DEBUG, 'clear', (self,)
- cursor = self.conn.cursor()
for cn in self.classes.keys():
sql = 'delete from _%s'%cn
if __debug__:
print >>hyperdb.DEBUG, 'clear', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
#
# Node IDs
''' Generate a new id for the given class
'''
# get the next ID
- cursor = self.conn.cursor()
sql = 'select num from ids where name=%s'%self.arg
if __debug__:
print >>hyperdb.DEBUG, 'newid', (self, sql, classname)
- cursor.execute(sql, (classname, ))
- newid = cursor.fetchone()[0]
+ self.cursor.execute(sql, (classname, ))
+ newid = self.cursor.fetchone()[0]
# update the counter
sql = 'update ids set num=%s where name=%s'%(self.arg, self.arg)
vals = (int(newid)+1, classname)
if __debug__:
print >>hyperdb.DEBUG, 'newid', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
# return as string
return str(newid)
def setid(self, classname, setid):
''' Set the id counter: used during import of database
'''
- cursor = self.conn.cursor()
sql = 'update ids set num=%s where name=%s'%(self.arg, self.arg)
- vals = (setid, spec.classname)
+ vals = (setid, classname)
if __debug__:
print >>hyperdb.DEBUG, 'setid', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
#
# Nodes
cl = self.classes[classname]
cols, mls = self.determine_columns(cl.properties.items())
+ # we'll be supplied these props if we're doing an import
+ if not node.has_key('creator'):
+ # add in the "calculated" properties (dupe so we don't affect
+ # calling code's node assumptions)
+ node = node.copy()
+ node['creation'] = node['activity'] = date.Date()
+ node['creator'] = self.curuserid
+
# default the non-multilink columns
for col, prop in cl.properties.items():
if not isinstance(col, Multilink):
if not node.has_key(col):
node[col] = None
+ # clear this node out of the cache if it's in there
+ key = (classname, nodeid)
+ if self.cache.has_key(key):
+ del self.cache[key]
+ self.cache_lru.remove(key)
+
+ # make the node data safe for the DB
node = self.serialise(classname, node)
# make sure the ordering is correct for column name -> column value
cols = ','.join(cols) + ',id,__retired__'
# perform the inserts
- cursor = self.conn.cursor()
sql = 'insert into _%s (%s) values (%s)'%(classname, cols, s)
if __debug__:
print >>hyperdb.DEBUG, 'addnode', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
# insert the multilink rows
for col in mls:
for entry in node[col]:
sql = 'insert into %s (linkid, nodeid) values (%s,%s)'%(t,
self.arg, self.arg)
- self.sql(cursor, sql, (entry, nodeid))
+ self.sql(sql, (entry, nodeid))
# make sure we do the commit-time extra stuff for this node
self.transactions.append((self.doSaveNode, (classname, nodeid, node)))
- def setnode(self, classname, nodeid, node, multilink_changes):
+ def setnode(self, classname, nodeid, values, multilink_changes):
''' Change the specified node.
'''
if __debug__:
- print >>hyperdb.DEBUG, 'setnode', (self, classname, nodeid, node)
- node = self.serialise(classname, node)
+ print >>hyperdb.DEBUG, 'setnode', (self, classname, nodeid, values)
+
+ # clear this node out of the cache if it's in there
+ key = (classname, nodeid)
+ if self.cache.has_key(key):
+ del self.cache[key]
+ self.cache_lru.remove(key)
+
+ # add the special props
+ values = values.copy()
+ values['activity'] = date.Date()
+
+ # make db-friendly
+ values = self.serialise(classname, values)
cl = self.classes[classname]
cols = []
mls = []
# add the multilinks separately
- for col in node.keys():
- prop = cl.properties[col]
+ props = cl.getprops()
+ for col in values.keys():
+ prop = props[col]
if isinstance(prop, Multilink):
mls.append(col)
else:
cols.append('_'+col)
cols.sort()
- # make sure the ordering is correct for column name -> column value
- vals = tuple([node[col[1:]] for col in cols])
- s = ','.join(['%s=%s'%(x, self.arg) for x in cols])
- cols = ','.join(cols)
+ # if there's any updates to regular columns, do them
+ if cols:
+ # make sure the ordering is correct for column name -> column value
+ sqlvals = tuple([values[col[1:]] for col in cols]) + (nodeid,)
+ s = ','.join(['%s=%s'%(x, self.arg) for x in cols])
+ cols = ','.join(cols)
- # perform the update
- cursor = self.conn.cursor()
- sql = 'update _%s set %s'%(classname, s)
- if __debug__:
- print >>hyperdb.DEBUG, 'setnode', (self, sql, vals)
- cursor.execute(sql, vals)
+ # perform the update
+ sql = 'update _%s set %s where id=%s'%(classname, s, self.arg)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'setnode', (self, sql, sqlvals)
+ self.cursor.execute(sql, sqlvals)
# now the fun bit, updating the multilinks ;)
for col, (add, remove) in multilink_changes.items():
sql = 'insert into %s (nodeid, linkid) values (%s,%s)'%(tn,
self.arg, self.arg)
for addid in add:
- self.sql(cursor, sql, (nodeid, addid))
+ self.sql(sql, (nodeid, addid))
if remove:
sql = 'delete from %s where nodeid=%s and linkid=%s'%(tn,
self.arg, self.arg)
for removeid in remove:
- self.sql(cursor, sql, (nodeid, removeid))
+ self.sql(sql, (nodeid, removeid))
# make sure we do the commit-time extra stuff for this node
- self.transactions.append((self.doSaveNode, (classname, nodeid, node)))
+ self.transactions.append((self.doSaveNode, (classname, nodeid, values)))
def getnode(self, classname, nodeid):
''' Get a node from the database.
'''
if __debug__:
print >>hyperdb.DEBUG, 'getnode', (self, classname, nodeid)
+
+ # see if we have this node cached
+ key = (classname, nodeid)
+ if self.cache.has_key(key):
+ # push us back to the top of the LRU
+ self.cache_lru.remove(key)
+ self.cache_lru.insert(0, key)
+ # return the cached information
+ return self.cache[key]
+
# figure the columns we're fetching
cl = self.classes[classname]
cols, mls = self.determine_columns(cl.properties.items())
scols = ','.join(cols)
# perform the basic property fetch
- cursor = self.conn.cursor()
sql = 'select %s from _%s where id=%s'%(scols, classname, self.arg)
- self.sql(cursor, sql, (nodeid,))
+ self.sql(sql, (nodeid,))
- values = self.sql_fetchone(cursor)
+ values = self.sql_fetchone()
if values is None:
raise IndexError, 'no such %s node %s'%(classname, nodeid)
# get the link ids
sql = 'select linkid from %s_%s where nodeid=%s'%(classname, col,
self.arg)
- cursor.execute(sql, (nodeid,))
+ self.cursor.execute(sql, (nodeid,))
# extract the first column from the result
- node[col] = [x[0] for x in cursor.fetchall()]
+ node[col] = [x[0] for x in self.cursor.fetchall()]
- return self.unserialise(classname, node)
+ # un-dbificate the node data
+ node = self.unserialise(classname, node)
+
+ # save off in the cache
+ key = (classname, nodeid)
+ self.cache[key] = node
+ # update the LRU
+ self.cache_lru.insert(0, key)
+ if len(self.cache_lru) > ROW_CACHE_SIZE:
+ del self.cache[self.cache_lru.pop()]
+
+ return node
def destroynode(self, classname, nodeid):
'''Remove a node from the database. Called exclusively by the
if not self.hasnode(classname, nodeid):
raise IndexError, '%s has no node %s'%(classname, nodeid)
+ # see if we have this node cached
+ if self.cache.has_key((classname, nodeid)):
+ del self.cache[(classname, nodeid)]
+
# see if there's any obvious commit actions that we should get rid of
for entry in self.transactions[:]:
if entry[1][:2] == (classname, nodeid):
self.transactions.remove(entry)
# now do the SQL
- cursor = self.conn.cursor()
sql = 'delete from _%s where id=%s'%(classname, self.arg)
- self.sql(cursor, sql, (nodeid,))
+ self.sql(sql, (nodeid,))
# remove from multilnks
cl = self.getclass(classname)
for col in mls:
# get the link ids
sql = 'delete from %s_%s where nodeid=%s'%(classname, col, self.arg)
- cursor.execute(sql, (nodeid,))
+ self.cursor.execute(sql, (nodeid,))
# remove journal entries
sql = 'delete from %s__journal where nodeid=%s'%(classname, self.arg)
- self.sql(cursor, sql, (nodeid,))
+ self.sql(sql, (nodeid,))
def serialise(self, classname, node):
'''Copy the node contents, converting non-marshallable data into
def hasnode(self, classname, nodeid):
''' Determine if the database has a given node.
'''
- cursor = self.conn.cursor()
sql = 'select count(*) from _%s where id=%s'%(classname, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'hasnode', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
- return int(cursor.fetchone()[0])
+ self.cursor.execute(sql, (nodeid,))
+ return int(self.cursor.fetchone()[0])
def countnodes(self, classname):
''' Count the number of nodes that exist for a particular Class.
'''
- cursor = self.conn.cursor()
sql = 'select count(*) from _%s'%classname
if __debug__:
print >>hyperdb.DEBUG, 'countnodes', (self, sql)
- cursor.execute(sql)
- return cursor.fetchone()[0]
+ self.cursor.execute(sql)
+ return self.cursor.fetchone()[0]
def getnodeids(self, classname, retired=0):
''' Retrieve all the ids of the nodes for a particular Class.
Set retired=None to get all nodes. Otherwise it'll get all the
retired or non-retired nodes, depending on the flag.
'''
- cursor = self.conn.cursor()
# flip the sense of the flag if we don't want all of them
if retired is not None:
retired = not retired
sql = 'select id from _%s where __retired__ <> %s'%(classname, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'getnodeids', (self, sql, retired)
- cursor.execute(sql, (retired,))
- return [x[0] for x in cursor.fetchall()]
+ self.cursor.execute(sql, (retired,))
+ return [x[0] for x in self.cursor.fetchall()]
def addjournal(self, classname, nodeid, action, params, creator=None,
creation=None):
if creator:
journaltag = creator
else:
- journaltag = self.journaltag
+ journaltag = self.curuserid
if creation:
journaldate = creation.serialise()
else:
print >>hyperdb.DEBUG, 'addjournal', (nodeid, journaldate,
journaltag, action, params)
- cursor = self.conn.cursor()
- self.save_journal(cursor, classname, cols, nodeid, journaldate,
+ self.save_journal(classname, cols, nodeid, journaldate,
journaltag, action, params)
- def save_journal(self, cursor, classname, cols, nodeid, journaldate,
+ def save_journal(self, classname, cols, nodeid, journaldate,
journaltag, action, params):
''' Save the journal entry to the database
'''
if not self.hasnode(classname, nodeid):
raise IndexError, '%s has no node %s'%(classname, nodeid)
- cursor = self.conn.cursor()
cols = ','.join('nodeid date tag action params'.split())
- return self.load_journal(cursor, classname, cols, nodeid)
+ return self.load_journal(classname, cols, nodeid)
- def load_journal(self, cursor, classname, cols, nodeid):
+ def load_journal(self, classname, cols, nodeid):
''' Load the journal from the database
'''
raise NotImplemented
date_stamp = pack_before.serialise()
# do the delete
- cursor = self.conn.cursor()
for classname in self.classes.keys():
sql = "delete from %s__journal where date<%s and "\
"action<>'create'"%(classname, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'pack', (self, sql, date_stamp)
- cursor.execute(sql, (date_stamp,))
+ self.cursor.execute(sql, (date_stamp,))
def sql_commit(self):
''' Actually commit to the database.
value = pwd
d[propname] = value
+ # add the node and journal
+ self.db.addnode(self.classname, newid, d)
+
# extract the extraneous journalling gumpf and nuke it
if d.has_key('creator'):
creator = d['creator']
del d['creator']
+ else:
+ creator = None
if d.has_key('creation'):
creation = d['creation']
del d['creation']
+ else:
+ creation = None
if d.has_key('activity'):
del d['activity']
-
- # add the node and journal
- self.db.addnode(self.classname, newid, d)
self.db.addjournal(self.classname, newid, 'create', d, creator,
creation)
return newid
if propname == 'id':
return nodeid
+ # get the node's dict
+ d = self.db.getnode(self.classname, nodeid)
+
if propname == 'creation':
- if not self.do_journal:
- raise ValueError, 'Journalling is disabled for this class'
- journal = self.db.getjournal(self.classname, nodeid)
- if journal:
- return self.db.getjournal(self.classname, nodeid)[0][1]
+ if d.has_key('creation'):
+ return d['creation']
else:
- # on the strange chance that there's no journal
return date.Date()
if propname == 'activity':
- if not self.do_journal:
- raise ValueError, 'Journalling is disabled for this class'
- journal = self.db.getjournal(self.classname, nodeid)
- if journal:
- return self.db.getjournal(self.classname, nodeid)[-1][1]
+ if d.has_key('activity'):
+ return d['activity']
else:
- # on the strange chance that there's no journal
return date.Date()
if propname == 'creator':
- if not self.do_journal:
- raise ValueError, 'Journalling is disabled for this class'
- journal = self.db.getjournal(self.classname, nodeid)
- if journal:
- name = self.db.getjournal(self.classname, nodeid)[0][2]
+ if d.has_key('creator'):
+ return d['creator']
else:
- return None
- try:
- return self.db.user.lookup(name)
- except KeyError:
- # the journaltag user doesn't exist any more
- return None
+ return self.db.curuserid
# get the property (raises KeyErorr if invalid)
prop = self.properties[propname]
- # get the node's dict
- d = self.db.getnode(self.classname, nodeid) #, cache=cache)
-
if not d.has_key(propname):
if default is self._marker:
if isinstance(prop, Multilink):
# this will raise the KeyError if the property isn't valid
# ... we don't use getprops() here because we only care about
# the writeable properties.
- prop = self.properties[propname]
+ try:
+ prop = self.properties[propname]
+ except KeyError:
+ raise KeyError, '"%s" has no property named "%s"'%(
+ self.classname, propname)
# if the value's the same as the existing value, no sense in
# doing anything
except ValueError:
raise TypeError, 'new property "%s" not boolean'%propname
- node[propname] = value
-
# nothing to do?
if not propvalues:
return propvalues
# do the set, and journal it
- self.db.setnode(self.classname, nodeid, node, multilink_changes)
+ self.db.setnode(self.classname, nodeid, propvalues, multilink_changes)
if self.do_journal:
propvalues.update(journalvalues)
if self.db.journaltag is None:
raise DatabaseError, 'Database open read-only'
- cursor = self.db.conn.cursor()
sql = 'update _%s set __retired__=1 where id=%s'%(self.classname,
self.db.arg)
if __debug__:
print >>hyperdb.DEBUG, 'retire', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
+ self.db.cursor.execute(sql, (nodeid,))
def is_retired(self, nodeid):
'''Return true if the node is rerired
'''
- cursor = self.db.conn.cursor()
sql = 'select __retired__ from _%s where id=%s'%(self.classname,
self.db.arg)
if __debug__:
print >>hyperdb.DEBUG, 'is_retired', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
- return int(cursor.fetchone()[0])
+ self.db.cursor.execute(sql, (nodeid,))
+ return int(self.db.sql_fetchone()[0])
def destroy(self, nodeid):
'''Destroy a node.
if not self.key:
raise TypeError, 'No key property set for class %s'%self.classname
- cursor = self.db.conn.cursor()
- sql = 'select id from _%s where _%s=%s'%(self.classname, self.key,
- self.db.arg)
- if __debug__:
- print >>hyperdb.DEBUG, 'lookup', (self, sql, keyvalue)
- cursor.execute(sql, (keyvalue,))
+ sql = 'select id,__retired__ from _%s where _%s=%s'%(self.classname,
+ self.key, self.db.arg)
+ self.db.sql(sql, (keyvalue,))
- # see if there was a result
- l = cursor.fetchall()
- if not l:
- raise KeyError, keyvalue
+ # see if there was a result that's not retired
+ l = self.db.cursor.fetchall()
+ if not l or int(l[0][1]):
+ raise KeyError, 'No key (%s) value "%s" for "%s"'%(self.key,
+ keyvalue, self.classname)
# return the id
return l[0][0]
tables.append('select nodeid from %s_%s where linkid in (%s)'%(
self.classname, prop, ','.join([a for x in values.keys()])))
sql = '\nintersect\n'.join(tables)
+ self.db.sql(sql, allvalues)
+ l = [x[0] for x in self.db.sql_fetchall()]
if __debug__:
- print >>hyperdb.DEBUG, 'find', (self, sql, allvalues)
- cursor = self.db.conn.cursor()
- cursor.execute(sql, allvalues)
- try:
- l = [x[0] for x in cursor.fetchall()]
- except gadfly.database.error, message:
- if message == 'no more results':
- l = []
- raise
+ print >>hyperdb.DEBUG, 'find ... ', l
+ return l
+
+ def stringFind(self, **requirements):
+ '''Locate a particular node by matching a set of its String
+ properties in a caseless search.
+
+ If the property is not a String property, a TypeError is raised.
+
+ The return is a list of the id of all nodes that match.
+ '''
+ where = []
+ args = []
+ for propname in requirements.keys():
+ prop = self.properties[propname]
+ if isinstance(not prop, String):
+ raise TypeError, "'%s' not a String property"%propname
+ where.append(propname)
+ args.append(requirements[propname].lower())
+
+ # generate the where clause
+ s = ' and '.join(['_%s=%s'%(col, self.db.arg) for col in where])
+ sql = 'select id from _%s where %s'%(self.classname, s)
+ self.db.sql(sql, tuple(args))
+ l = [x[0] for x in self.db.sql_fetchall()]
if __debug__:
print >>hyperdb.DEBUG, 'find ... ', l
return l
"sort" and "group" are (dir, prop) where dir is '+', '-' or None
and prop is a prop name or None
"search_matches" is {nodeid: marker}
+
+ The filter must match all properties specificed - but if the
+ property value to match is a list, any one of the values in the
+ list may match for that property to match.
'''
cn = self.classname
frum = ['_'+cn]
where = []
args = []
+ a = self.db.arg
for k, v in filterspec.items():
propclass = props[k]
+ # now do other where clause stuff
if isinstance(propclass, Multilink):
tn = '%s_%s'%(cn, k)
frum.append(tn)
if isinstance(v, type([])):
- s = ','.join([self.arg for x in v])
+ s = ','.join([a for x in v])
where.append('id=%s.nodeid and %s.linkid in (%s)'%(tn,tn,s))
args = args + v
else:
- where.append('id=%s.nodeid and %s.linkid = %s'%(tn, tn,
- self.arg))
+ where.append('id=%s.nodeid and %s.linkid = %s'%(tn, tn, a))
args.append(v)
+ elif isinstance(propclass, String):
+ if not isinstance(v, type([])):
+ v = [v]
+
+ # Quote the bits in the string that need it and then embed
+ # in a "substring" search. Note - need to quote the '%' so
+ # they make it through the python layer happily
+ v = ['%%'+self.db.sql_stringquote(s)+'%%' for s in v]
+
+ # now add to the where clause
+ where.append(' or '.join(["_%s LIKE '%s'"%(k, s) for s in v]))
+ # note: args are embedded in the query string now
+ elif isinstance(propclass, Link):
+ if isinstance(v, type([])):
+ if '-1' in v:
+ v.remove('-1')
+ xtra = ' or _%s is NULL'%k
+ else:
+ xtra = ''
+ s = ','.join([a for x in v])
+ where.append('(_%s in (%s)%s)'%(k, s, xtra))
+ args = args + v
+ else:
+ if v == '-1':
+ v = None
+ where.append('_%s is NULL'%k)
+ else:
+ where.append('_%s=%s'%(k, a))
+ args.append(v)
else:
if isinstance(v, type([])):
- s = ','.join([self.arg for x in v])
+ s = ','.join([a for x in v])
where.append('_%s in (%s)'%(k, s))
args = args + v
else:
- where.append('_%s=%s'%(k, self.arg))
+ where.append('_%s=%s'%(k, a))
args.append(v)
# add results of full text search
if search_matches is not None:
v = search_matches.keys()
- s = ','.join([self.arg for x in v])
+ s = ','.join([a for x in v])
where.append('id in (%s)'%s)
args = args + v
- # figure the order by clause
+ # "grouping" is just the first-order sorting in the SQL fetch
+ # can modify it...)
orderby = []
ordercols = []
- if sort[0] is not None and sort[1] is not None:
- if sort[0] != '-':
- orderby.append('_'+sort[1])
- ordercols.append(sort[1])
- else:
- orderby.append('_'+sort[1]+' desc')
- ordercols.append(sort[1])
-
- # figure the group by clause
- groupby = []
- groupcols = []
if group[0] is not None and group[1] is not None:
if group[0] != '-':
- groupby.append('_'+group[1])
- groupcols.append(group[1])
+ orderby.append('_'+group[1])
+ ordercols.append('_'+group[1])
else:
- groupby.append('_'+group[1]+' desc')
- groupcols.append(group[1])
+ orderby.append('_'+group[1]+' desc')
+ ordercols.append('_'+group[1])
+
+ # now add in the sorting
+ group = ''
+ if sort[0] is not None and sort[1] is not None:
+ direction, colname = sort
+ if direction != '-':
+ if colname == 'id':
+ orderby.append(colname)
+ else:
+ orderby.append('_'+colname)
+ ordercols.append('_'+colname)
+ else:
+ if colname == 'id':
+ orderby.append(colname+' desc')
+ ordercols.append(colname)
+ else:
+ orderby.append('_'+colname+' desc')
+ ordercols.append('_'+colname)
# construct the SQL
frum = ','.join(frum)
- where = ' and '.join(where)
+ if where:
+ where = ' where ' + (' and '.join(where))
+ else:
+ where = ''
cols = ['id']
if orderby:
cols = cols + ordercols
order = ' order by %s'%(','.join(orderby))
else:
order = ''
- if groupby:
- cols = cols + groupcols
- group = ' group by %s'%(','.join(groupby))
- else:
- group = ''
cols = ','.join(cols)
- sql = 'select %s from %s where %s%s%s'%(cols, frum, where, order,
- group)
+ sql = 'select %s from %s %s%s%s'%(cols, frum, where, group, order)
args = tuple(args)
if __debug__:
- print >>hyperdb.DEBUG, 'find', (self, sql, args)
- cursor = self.db.conn.cursor()
- cursor.execute(sql, args)
+ print >>hyperdb.DEBUG, 'filter', (self, sql, args)
+ self.db.cursor.execute(sql, args)
+ l = self.db.cursor.fetchall()
+
+ # return the IDs (the first column)
+ # XXX The filter(None, l) bit is sqlite-specific... if there's _NO_
+ # XXX matches to a fetch, it returns NULL instead of nothing!?!
+ return filter(None, [row[0] for row in l])
def count(self):
'''Get the number of nodes in this class.
d['id'] = String()
d['creation'] = hyperdb.Date()
d['activity'] = hyperdb.Date()
- d['creator'] = hyperdb.Link("user")
+ d['creator'] = hyperdb.Link('user')
return d
def addprop(self, **properties):