index 195c8d06f5a35d5322c4b59fc62af89468bae7d9..979382f50533736619b38de15c4373adf8de69ba 100644 (file)
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-#$Id: back_anydbm.py,v 1.45 2002-07-14 04:03:14 richard Exp $
+#$Id: back_anydbm.py,v 1.57 2002-07-31 23:57:36 richard Exp $
'''
This module defines a backend that saves the hyperdatabase in a database
chosen by anydbm. It is guaranteed to always be available in python
'''
This module defines a backend that saves the hyperdatabase in a database
chosen by anydbm. It is guaranteed to always be available in python
'''
import whichdb, anydbm, os, marshal, re, weakref, string, copy
'''
import whichdb, anydbm, os, marshal, re, weakref, string, copy
-from roundup import hyperdb, date, password, roundupdb
+from roundup import hyperdb, date, password, roundupdb, security
from blobfiles import FileStorage
from blobfiles import FileStorage
+from sessions import Sessions
from roundup.indexer import Indexer
from locking import acquire_lock, release_lock
from roundup.hyperdb import String, Password, Date, Interval, Link, \
from roundup.indexer import Indexer
from locking import acquire_lock, release_lock
from roundup.hyperdb import String, Password, Date, Interval, Link, \
- Multilink, DatabaseError
+ Multilink, DatabaseError, Boolean, Number
#
# Now the database
#
# Now the database
self.cache = {} # cache of nodes loaded or created
self.dirtynodes = {} # keep track of the dirty nodes by class
self.newnodes = {} # keep track of the new nodes by class
self.cache = {} # cache of nodes loaded or created
self.dirtynodes = {} # keep track of the dirty nodes by class
self.newnodes = {} # keep track of the new nodes by class
+ self.destroyednodes = {}# keep track of the destroyed nodes by class
self.transactions = []
self.indexer = Indexer(self.dir)
self.transactions = []
self.indexer = Indexer(self.dir)
+ self.sessions = Sessions(self.config)
+ self.security = security.Security(self)
# ensure files are group readable and writable
os.umask(0002)
# ensure files are group readable and writable
os.umask(0002)
'''
if __debug__:
print >>hyperdb.DEBUG, 'getclassdb', (self, classname, mode)
'''
if __debug__:
print >>hyperdb.DEBUG, 'getclassdb', (self, classname, mode)
- return self._opendb('nodes.%s'%classname, mode)
+ return self.opendb('nodes.%s'%classname, mode)
- def _opendb(self, name, mode):
- '''Low-level database opener that gets around anydbm/dbm
- eccentricities.
+ def determine_db_type(self, path):
+ ''' determine which DB wrote the class file
'''
'''
- if __debug__:
- print >>hyperdb.DEBUG, '_opendb', (self, name, mode)
-
- # determine which DB wrote the class file
db_type = ''
db_type = ''
- path = os.path.join(os.getcwd(), self.dir, name)
if os.path.exists(path):
db_type = whichdb.whichdb(path)
if not db_type:
if os.path.exists(path):
db_type = whichdb.whichdb(path)
if not db_type:
# if the path ends in '.db', it's a dbm database, whether
# anydbm says it's dbhash or not!
db_type = 'dbm'
# if the path ends in '.db', it's a dbm database, whether
# anydbm says it's dbhash or not!
db_type = 'dbm'
+ return db_type
+
+ def opendb(self, name, mode):
+ '''Low-level database opener that gets around anydbm/dbm
+ eccentricities.
+ '''
+ if __debug__:
+ print >>hyperdb.DEBUG, 'opendb', (self, name, mode)
+
+ # figure the class db type
+ path = os.path.join(os.getcwd(), self.dir, name)
+ db_type = self.determine_db_type(path)
# new database? let anydbm pick the best dbm
if not db_type:
if __debug__:
# new database? let anydbm pick the best dbm
if not db_type:
if __debug__:
- print >>hyperdb.DEBUG, "_opendb anydbm.open(%r, 'n')"%path
+ print >>hyperdb.DEBUG, "opendb anydbm.open(%r, 'n')"%path
return anydbm.open(path, 'n')
# open the database with the correct module
return anydbm.open(path, 'n')
# open the database with the correct module
"Couldn't open database - the required module '%s'"\
" is not available"%db_type
if __debug__:
"Couldn't open database - the required module '%s'"\
" is not available"%db_type
if __debug__:
- print >>hyperdb.DEBUG, "_opendb %r.open(%r, %r)"%(db_type, path,
+ print >>hyperdb.DEBUG, "opendb %r.open(%r, %r)"%(db_type, path,
mode)
return dbm.open(path, mode)
mode)
return dbm.open(path, mode)
- def _lockdb(self, name):
+ def lockdb(self, name):
''' Lock a database file
'''
path = os.path.join(os.getcwd(), self.dir, '%s.lock'%name)
''' Lock a database file
'''
path = os.path.join(os.getcwd(), self.dir, '%s.lock'%name)
''' Generate a new id for the given class
'''
# open the ids DB - create if if doesn't exist
''' Generate a new id for the given class
'''
# open the ids DB - create if if doesn't exist
- lock = self._lockdb('_ids')
- db = self._opendb('_ids', 'c')
+ lock = self.lockdb('_ids')
+ db = self.opendb('_ids', 'c')
if db.has_key(classname):
newid = db[classname] = str(int(db[classname]) + 1)
else:
if db.has_key(classname):
newid = db[classname] = str(int(db[classname]) + 1)
else:
'''
if __debug__:
print >>hyperdb.DEBUG, 'savenode', (self, classname, nodeid, node)
'''
if __debug__:
print >>hyperdb.DEBUG, 'savenode', (self, classname, nodeid, node)
- self.transactions.append((self._doSaveNode, (classname, nodeid, node)))
+ self.transactions.append((self.doSaveNode, (classname, nodeid, node)))
def getnode(self, classname, nodeid, db=None, cache=1):
''' get a node from the database
def getnode(self, classname, nodeid, db=None, cache=1):
''' get a node from the database
if not db.has_key(nodeid):
raise IndexError, "no such %s %s"%(classname, nodeid)
if not db.has_key(nodeid):
raise IndexError, "no such %s %s"%(classname, nodeid)
+ # check the uncommitted, destroyed nodes
+ if (self.destroyednodes.has_key(classname) and
+ self.destroyednodes[classname].has_key(nodeid)):
+ raise IndexError, "no such %s %s"%(classname, nodeid)
+
# decode
res = marshal.loads(db[nodeid])
# decode
res = marshal.loads(db[nodeid])
return res
return res
+ def destroynode(self, classname, nodeid):
+ '''Remove a node from the database. Called exclusively by the
+ destroy() method on Class.
+ '''
+ if __debug__:
+ print >>hyperdb.DEBUG, 'destroynode', (self, classname, nodeid)
+
+ # remove from cache and newnodes if it's there
+ if (self.cache.has_key(classname) and
+ self.cache[classname].has_key(nodeid)):
+ del self.cache[classname][nodeid]
+ if (self.newnodes.has_key(classname) and
+ self.newnodes[classname].has_key(nodeid)):
+ del self.newnodes[classname][nodeid]
+
+ # see if there's any obvious commit actions that we should get rid of
+ for entry in self.transactions[:]:
+ if entry[1][:2] == (classname, nodeid):
+ self.transactions.remove(entry)
+
+ # add to the destroyednodes map
+ self.destroyednodes.setdefault(classname, {})[nodeid] = 1
+
+ # add the destroy commit action
+ self.transactions.append((self.doDestroyNode, (classname, nodeid)))
+
def serialise(self, classname, node):
'''Copy the node contents, converting non-marshallable data into
marshallable data.
def serialise(self, classname, node):
'''Copy the node contents, converting non-marshallable data into
marshallable data.
def countnodes(self, classname, db=None):
if __debug__:
print >>hyperdb.DEBUG, 'countnodes', (self, classname, db)
def countnodes(self, classname, db=None):
if __debug__:
print >>hyperdb.DEBUG, 'countnodes', (self, classname, db)
- # include the new nodes not saved to the DB yet
- count = len(self.newnodes.get(classname, {}))
+
+ count = 0
+
+ # include the uncommitted nodes
+ if self.newnodes.has_key(classname):
+ count += len(self.newnodes[classname])
+ if self.destroyednodes.has_key(classname):
+ count -= len(self.destroyednodes[classname])
# and count those in the DB
if db is None:
# and count those in the DB
if db is None:
def getnodeids(self, classname, db=None):
if __debug__:
print >>hyperdb.DEBUG, 'getnodeids', (self, classname, db)
def getnodeids(self, classname, db=None):
if __debug__:
print >>hyperdb.DEBUG, 'getnodeids', (self, classname, db)
+
+ res = []
+
# start off with the new nodes
# start off with the new nodes
- res = self.newnodes.get(classname, {}).keys()
+ if self.newnodes.has_key(classname):
+ res += self.newnodes[classname].keys()
if db is None:
db = self.getclassdb(classname)
res = res + db.keys()
if db is None:
db = self.getclassdb(classname)
res = res + db.keys()
+
+ # remove the uncommitted, destroyed nodes
+ if self.destroyednodes.has_key(classname):
+ for nodeid in self.destroyednodes[classname].keys():
+ if db.has_key(nodeid):
+ res.remove(nodeid)
+
return res
return res
if __debug__:
print >>hyperdb.DEBUG, 'addjournal', (self, classname, nodeid,
action, params)
if __debug__:
print >>hyperdb.DEBUG, 'addjournal', (self, classname, nodeid,
action, params)
- self.transactions.append((self._doSaveJournal, (classname, nodeid,
+ self.transactions.append((self.doSaveJournal, (classname, nodeid,
action, params)))
def getjournal(self, classname, nodeid):
''' get the journal for id
action, params)))
def getjournal(self, classname, nodeid):
''' get the journal for id
+
+ Raise IndexError if the node doesn't exist (as per history()'s
+ API)
'''
if __debug__:
print >>hyperdb.DEBUG, 'getjournal', (self, classname, nodeid)
# attempt to open the journal - in some rare cases, the journal may
# not exist
try:
'''
if __debug__:
print >>hyperdb.DEBUG, 'getjournal', (self, classname, nodeid)
# attempt to open the journal - in some rare cases, the journal may
# not exist
try:
- db = self._opendb('journals.%s'%classname, 'r')
+ db = self.opendb('journals.%s'%classname, 'r')
except anydbm.error, error:
except anydbm.error, error:
- if str(error) == "need 'c' or 'n' flag to open new db": return []
- elif error.args[0] != 2: raise
- return []
+ if str(error) == "need 'c' or 'n' flag to open new db":
+ raise IndexError, 'no such %s %s'%(classname, nodeid)
+ elif error.args[0] != 2:
+ raise
+ raise IndexError, 'no such %s %s'%(classname, nodeid)
try:
journal = marshal.loads(db[nodeid])
except KeyError:
db.close()
try:
journal = marshal.loads(db[nodeid])
except KeyError:
db.close()
- raise KeyError, 'no such %s %s'%(classname, nodeid)
+ raise IndexError, 'no such %s %s'%(classname, nodeid)
db.close()
res = []
db.close()
res = []
- for entry in journal:
- (nodeid, date_stamp, user, action, params) = entry
- date_obj = date.Date(date_stamp)
- res.append((nodeid, date_obj, user, action, params))
+ for nodeid, date_stamp, user, action, params in journal:
+ res.append((nodeid, date.Date(date_stamp), user, action, params))
return res
def pack(self, pack_before):
return res
def pack(self, pack_before):
classes = self.getclasses()
classes = self.getclasses()
- # TODO: factor this out to method - we're already doing it in
- # _opendb.
- db_type = ''
- path = os.path.join(os.getcwd(), self.dir, classes[0])
- if os.path.exists(path):
- db_type = whichdb.whichdb(path)
- if not db_type:
- raise hyperdb.DatabaseError, "Couldn't identify database type"
- elif os.path.exists(path+'.db'):
- db_type = 'dbm'
+ # figure the class db type
for classname in classes:
db_name = 'journals.%s'%classname
for classname in classes:
db_name = 'journals.%s'%classname
- db = self._opendb(db_name, 'w')
+ path = os.path.join(os.getcwd(), self.dir, classname)
+ db_type = self.determine_db_type(path)
+ db = self.opendb(db_name, 'w')
for key in db.keys():
journal = marshal.loads(db[key])
for key in db.keys():
journal = marshal.loads(db[key])
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
+ self.destroyednodes = {}
self.transactions = []
self.transactions = []
- def _doSaveNode(self, classname, nodeid, node):
+ def getCachedClassDB(self, classname):
+ ''' get the class db, looking in our cache of databases for commit
+ '''
+ # get the database handle
+ db_name = 'nodes.%s'%classname
+ if not self.databases.has_key(db_name):
+ self.databases[db_name] = self.getclassdb(classname, 'c')
+ return self.databases[db_name]
+
+ def doSaveNode(self, classname, nodeid, node):
if __debug__:
if __debug__:
- print >>hyperdb.DEBUG, '_doSaveNode', (self, classname, nodeid,
+ print >>hyperdb.DEBUG, 'doSaveNode', (self, classname, nodeid,
node)
node)
- # get the database handle
- db_name = 'nodes.%s'%classname
- if self.databases.has_key(db_name):
- db = self.databases[db_name]
- else:
- db = self.databases[db_name] = self.getclassdb(classname, 'c')
+ db = self.getCachedClassDB(classname)
# now save the marshalled data
db[nodeid] = marshal.dumps(self.serialise(classname, node))
# now save the marshalled data
db[nodeid] = marshal.dumps(self.serialise(classname, node))
# return the classname, nodeid so we reindex this content
return (classname, nodeid)
# return the classname, nodeid so we reindex this content
return (classname, nodeid)
- def _doSaveJournal(self, classname, nodeid, action, params):
+ def getCachedJournalDB(self, classname):
+ ''' get the journal db, looking in our cache of databases for commit
+ '''
+ # get the database handle
+ db_name = 'journals.%s'%classname
+ if not self.databases.has_key(db_name):
+ self.databases[db_name] = self.opendb(db_name, 'c')
+ return self.databases[db_name]
+
+ def doSaveJournal(self, classname, nodeid, action, params):
# serialise first
if action in ('set', 'create'):
params = self.serialise(classname, params)
# serialise first
if action in ('set', 'create'):
params = self.serialise(classname, params)
params)
if __debug__:
params)
if __debug__:
- print >>hyperdb.DEBUG, '_doSaveJournal', entry
+ print >>hyperdb.DEBUG, 'doSaveJournal', entry
- # get the database handle
- db_name = 'journals.%s'%classname
- if self.databases.has_key(db_name):
- db = self.databases[db_name]
- else:
- db = self.databases[db_name] = self._opendb(db_name, 'c')
+ db = self.getCachedJournalDB(classname)
# now insert the journal entry
if db.has_key(nodeid):
# now insert the journal entry
if db.has_key(nodeid):
db[nodeid] = marshal.dumps(l)
db[nodeid] = marshal.dumps(l)
+ def doDestroyNode(self, classname, nodeid):
+ if __debug__:
+ print >>hyperdb.DEBUG, 'doDestroyNode', (self, classname, nodeid)
+
+ # delete from the class database
+ db = self.getCachedClassDB(classname)
+ if db.has_key(nodeid):
+ del db[nodeid]
+
+ # delete from the database
+ db = self.getCachedJournalDB(classname)
+ if db.has_key(nodeid):
+ del db[nodeid]
+
+ # return the classname, nodeid so we reindex this content
+ return (classname, nodeid)
+
def rollback(self):
''' Reverse all actions from the current transaction.
'''
def rollback(self):
''' Reverse all actions from the current transaction.
'''
print >>hyperdb.DEBUG, 'rollback', (self, )
for method, args in self.transactions:
# delete temporary files
print >>hyperdb.DEBUG, 'rollback', (self, )
for method, args in self.transactions:
# delete temporary files
- if method == self._doStoreFile:
- self._rollbackStoreFile(*args)
+ if method == self.doStoreFile:
+ self.rollbackStoreFile(*args)
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
+ self.destroyednodes = {}
self.transactions = []
_marker = []
self.transactions = []
_marker = []
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
key, value, link_class)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
key, value, link_class)
- elif not self.db.hasnode(link_class, value):
+ elif not self.db.getclass(link_class).hasnode(value):
raise IndexError, '%s has no node %s'%(link_class, value)
# save off the value
raise IndexError, '%s has no node %s'%(link_class, value)
# save off the value
propvalues[key] = value
# handle additions
propvalues[key] = value
# handle additions
- for id in value:
- if not self.db.hasnode(link_class, id):
- raise IndexError, '%s has no node %s'%(link_class, id)
+ for nodeid in value:
+ if not self.db.getclass(link_class).hasnode(nodeid):
+ raise IndexError, '%s has no node %s'%(link_class,
+ nodeid)
# register the link with the newly linked node
if self.do_journal and self.properties[key].do_journal:
# register the link with the newly linked node
if self.do_journal and self.properties[key].do_journal:
- self.db.addjournal(link_class, id, 'link',
+ self.db.addjournal(link_class, nodeid, 'link',
(self.classname, newid, key))
elif isinstance(prop, String):
(self.classname, newid, key))
elif isinstance(prop, String):
if value is not None and not isinstance(value, date.Interval):
raise TypeError, 'new property "%s" not an Interval'%key
if value is not None and not isinstance(value, date.Interval):
raise TypeError, 'new property "%s" not an Interval'%key
+ elif value is not None and isinstance(prop, Number):
+ try:
+ float(value)
+ except ValueError:
+ raise TypeError, 'new property "%s" not numeric'%key
+
+ elif value is not None and isinstance(prop, Boolean):
+ try:
+ int(value)
+ except ValueError:
+ raise TypeError, 'new property "%s" not boolean'%key
+
# make sure there's data where there needs to be
for key, prop in self.properties.items():
if propvalues.has_key(key):
# make sure there's data where there needs to be
for key, prop in self.properties.items():
if propvalues.has_key(key):
if isinstance(prop, Multilink):
propvalues[key] = []
else:
if isinstance(prop, Multilink):
propvalues[key] = []
else:
- # TODO: None isn't right here, I think...
propvalues[key] = None
# done
propvalues[key] = None
# done
if isinstance(prop, Multilink):
return []
else:
if isinstance(prop, Multilink):
return []
else:
- # TODO: None isn't right here, I think...
return None
else:
return default
return None
else:
return default
if node.has_key(self.db.RETIRED_FLAG):
raise IndexError
num_re = re.compile('^\d+$')
if node.has_key(self.db.RETIRED_FLAG):
raise IndexError
num_re = re.compile('^\d+$')
- for key, value in propvalues.items():
+
+ # if the journal value is to be different, store it in here
+ journalvalues = {}
+
+ for propname, value in propvalues.items():
# check to make sure we're not duplicating an existing key
# check to make sure we're not duplicating an existing key
- if key == self.key and node[key] != value:
+ if propname == self.key and node[propname] != value:
try:
self.lookup(value)
except KeyError:
try:
self.lookup(value)
except KeyError:
# this will raise the KeyError if the property isn't valid
# ... we don't use getprops() here because we only care about
# the writeable properties.
# this will raise the KeyError if the property isn't valid
# ... we don't use getprops() here because we only care about
# the writeable properties.
- prop = self.properties[key]
+ prop = self.properties[propname]
# if the value's the same as the existing value, no sense in
# doing anything
# if the value's the same as the existing value, no sense in
# doing anything
- if node.has_key(key) and value == node[key]:
- del propvalues[key]
+ if node.has_key(propname) and value == node[propname]:
+ del propvalues[propname]
continue
# do stuff based on the prop type
if isinstance(prop, Link):
continue
# do stuff based on the prop type
if isinstance(prop, Link):
- link_class = self.properties[key].classname
+ link_class = prop.classname
# if it isn't a number, it's a key
# if it isn't a number, it's a key
- if type(value) != type(''):
- raise ValueError, 'link value must be String'
- if not num_re.match(value):
+ if value is not None and not isinstance(value, type('')):
+ raise ValueError, 'property "%s" link value be a string'%(
+ propname)
+ if isinstance(value, type('')) and not num_re.match(value):
try:
value = self.db.classes[link_class].lookup(value)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
try:
value = self.db.classes[link_class].lookup(value)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
- key, value, self.properties[key].classname)
+ propname, value, prop.classname)
- if not self.db.hasnode(link_class, value):
+ if (value is not None and
+ not self.db.getclass(link_class).hasnode(value)):
raise IndexError, '%s has no node %s'%(link_class, value)
raise IndexError, '%s has no node %s'%(link_class, value)
- if self.do_journal and self.properties[key].do_journal:
+ if self.do_journal and prop.do_journal:
# register the unlink with the old linked node
# register the unlink with the old linked node
- if node[key] is not None:
- self.db.addjournal(link_class, node[key], 'unlink',
- (self.classname, nodeid, key))
+ if node[propname] is not None:
+ self.db.addjournal(link_class, node[propname], 'unlink',
+ (self.classname, nodeid, propname))
# register the link with the newly linked node
if value is not None:
self.db.addjournal(link_class, value, 'link',
# register the link with the newly linked node
if value is not None:
self.db.addjournal(link_class, value, 'link',
- (self.classname, nodeid, key))
+ (self.classname, nodeid, propname))
elif isinstance(prop, Multilink):
if type(value) != type([]):
elif isinstance(prop, Multilink):
if type(value) != type([]):
- raise TypeError, 'new property "%s" not a list of ids'%key
- link_class = self.properties[key].classname
+ raise TypeError, 'new property "%s" not a list of'\
+ ' ids'%propname
+ link_class = self.properties[propname].classname
l = []
for entry in value:
# if it isn't a number, it's a key
if type(entry) != type(''):
raise ValueError, 'new property "%s" link value ' \
l = []
for entry in value:
# if it isn't a number, it's a key
if type(entry) != type(''):
raise ValueError, 'new property "%s" link value ' \
- 'must be a string'%key
+ 'must be a string'%propname
if not num_re.match(entry):
try:
entry = self.db.classes[link_class].lookup(entry)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
if not num_re.match(entry):
try:
entry = self.db.classes[link_class].lookup(entry)
except (TypeError, KeyError):
raise IndexError, 'new property "%s": %s not a %s'%(
- key, entry, self.properties[key].classname)
+ propname, entry,
+ self.properties[propname].classname)
l.append(entry)
value = l
l.append(entry)
value = l
- propvalues[key] = value
+ propvalues[propname] = value
+
+ # figure the journal entry for this property
+ add = []
+ remove = []
# handle removals
# handle removals
- if node.has_key(key):
- l = node[key]
+ if node.has_key(propname):
+ l = node[propname]
else:
l = []
for id in l[:]:
if id in value:
continue
# register the unlink with the old linked node
else:
l = []
for id in l[:]:
if id in value:
continue
# register the unlink with the old linked node
- if self.do_journal and self.properties[key].do_journal:
+ if self.do_journal and self.properties[propname].do_journal:
self.db.addjournal(link_class, id, 'unlink',
self.db.addjournal(link_class, id, 'unlink',
- (self.classname, nodeid, key))
+ (self.classname, nodeid, propname))
l.remove(id)
l.remove(id)
+ remove.append(id)
# handle additions
for id in value:
# handle additions
for id in value:
- if not self.db.hasnode(link_class, id):
- raise IndexError, '%s has no node %s'%(
- link_class, id)
+ if not self.db.getclass(link_class).hasnode(id):
+ raise IndexError, '%s has no node %s'%(link_class, id)
if id in l:
continue
# register the link with the newly linked node
if id in l:
continue
# register the link with the newly linked node
- if self.do_journal and self.properties[key].do_journal:
+ if self.do_journal and self.properties[propname].do_journal:
self.db.addjournal(link_class, id, 'link',
self.db.addjournal(link_class, id, 'link',
- (self.classname, nodeid, key))
+ (self.classname, nodeid, propname))
l.append(id)
l.append(id)
+ add.append(id)
+
+ # figure the journal entry
+ l = []
+ if add:
+ l.append(('add', add))
+ if remove:
+ l.append(('remove', remove))
+ if l:
+ journalvalues[propname] = tuple(l)
elif isinstance(prop, String):
if value is not None and type(value) != type(''):
elif isinstance(prop, String):
if value is not None and type(value) != type(''):
- raise TypeError, 'new property "%s" not a string'%key
+ raise TypeError, 'new property "%s" not a string'%propname
elif isinstance(prop, Password):
if not isinstance(value, password.Password):
elif isinstance(prop, Password):
if not isinstance(value, password.Password):
- raise TypeError, 'new property "%s" not a Password'% key
- propvalues[key] = value
+ raise TypeError, 'new property "%s" not a Password'%propname
+ propvalues[propname] = value
elif value is not None and isinstance(prop, Date):
if not isinstance(value, date.Date):
elif value is not None and isinstance(prop, Date):
if not isinstance(value, date.Date):
- raise TypeError, 'new property "%s" not a Date'% key
- propvalues[key] = value
+ raise TypeError, 'new property "%s" not a Date'% propname
+ propvalues[propname] = value
elif value is not None and isinstance(prop, Interval):
if not isinstance(value, date.Interval):
elif value is not None and isinstance(prop, Interval):
if not isinstance(value, date.Interval):
- raise TypeError, 'new property "%s" not an Interval'% key
- propvalues[key] = value
+ raise TypeError, 'new property "%s" not an '\
+ 'Interval'%propname
+ propvalues[propname] = value
+
+ elif value is not None and isinstance(prop, Number):
+ try:
+ float(value)
+ except ValueError:
+ raise TypeError, 'new property "%s" not numeric'%propname
+
+ elif value is not None and isinstance(prop, Boolean):
+ try:
+ int(value)
+ except ValueError:
+ raise TypeError, 'new property "%s" not boolean'%propname
- node[key] = value
+ node[propname] = value
# nothing to do?
if not propvalues:
# nothing to do?
if not propvalues:
# do the set, and journal it
self.db.setnode(self.classname, nodeid, node)
# do the set, and journal it
self.db.setnode(self.classname, nodeid, node)
+
if self.do_journal:
if self.do_journal:
+ propvalues.update(journalvalues)
self.db.addjournal(self.classname, nodeid, 'set', propvalues)
self.fireReactors('set', nodeid, oldvalues)
self.db.addjournal(self.classname, nodeid, 'set', propvalues)
self.fireReactors('set', nodeid, oldvalues)
self.fireReactors('retire', nodeid, None)
self.fireReactors('retire', nodeid, None)
+ def destroy(self, nodeid):
+ """Destroy a node.
+
+ WARNING: this method should never be used except in extremely rare
+ situations where there could never be links to the node being
+ deleted
+ WARNING: use retire() instead
+ WARNING: the properties of this node will not be available ever again
+ WARNING: really, use retire() instead
+
+ Well, I think that's enough warnings. This method exists mostly to
+ support the session storage of the cgi interface.
+ """
+ if self.db.journaltag is None:
+ raise DatabaseError, 'Database open read-only'
+ self.db.destroynode(self.classname, nodeid)
+
def history(self, nodeid):
"""Retrieve the journal of edits on a particular node.
def history(self, nodeid):
"""Retrieve the journal of edits on a particular node.
'propname' must be the name of a String property of this class or
None, or a TypeError is raised. The values of the key property on
'propname' must be the name of a String property of this class or
None, or a TypeError is raised. The values of the key property on
- all existing nodes must be unique or a ValueError is raised.
+ all existing nodes must be unique or a ValueError is raised. If the
+ property doesn't exist, KeyError is raised.
"""
"""
- # TODO: validate that the property is a String!
+ prop = self.getprops()[propname]
+ if not isinstance(prop, String):
+ raise TypeError, 'key properties must be String'
self.key = propname
def getkey(self):
self.key = propname
def getkey(self):
prop = self.properties[propname]
if not isinstance(prop, Link) and not isinstance(prop, Multilink):
raise TypeError, "'%s' not a Link/Multilink property"%propname
prop = self.properties[propname]
if not isinstance(prop, Link) and not isinstance(prop, Multilink):
raise TypeError, "'%s' not a Link/Multilink property"%propname
- #XXX edit is expensive and of questionable use
- #for nodeid in nodeids:
- # if not self.db.hasnode(prop.classname, nodeid):
- # raise ValueError, '%s has no node %s'%(prop.classname, nodeid)
# ok, now do the find
cldb = self.db.getclassdb(self.classname)
# ok, now do the find
cldb = self.db.getclassdb(self.classname)
if node.has_key(self.db.RETIRED_FLAG):
continue
for key, value in requirements.items():
if node.has_key(self.db.RETIRED_FLAG):
continue
for key, value in requirements.items():
- if node[key] and node[key].lower() != value:
+ if node[key] is None or node[key].lower() != value:
break
else:
l.append(nodeid)
break
else:
l.append(nodeid)
l.sort()
return l
l.sort()
return l
- # XXX not in spec
def filter(self, search_matches, filterspec, sort, group,
num_re = re.compile('^\d+$')):
''' Return a list of the ids of the active nodes in this class that
match the 'filter' spec, sorted by the group spec and then the
def filter(self, search_matches, filterspec, sort, group,
num_re = re.compile('^\d+$')):
''' Return a list of the ids of the active nodes in this class that
match the 'filter' spec, sorted by the group spec and then the
- sort spec
+ sort spec.
+
+ "filterspec" is {propname: value(s)}
+ "sort" is ['+propname', '-propname', 'propname', ...]
+ "group is ['+propname', '-propname', 'propname', ...]
'''
cn = self.classname
# optimise filterspec
l = []
props = self.getprops()
'''
cn = self.classname
# optimise filterspec
l = []
props = self.getprops()
+ LINK = 0
+ MULTILINK = 1
+ STRING = 2
+ OTHER = 6
for k, v in filterspec.items():
propclass = props[k]
if isinstance(propclass, Link):
for k, v in filterspec.items():
propclass = props[k]
if isinstance(propclass, Link):
k, entry, self.properties[k].classname)
u.append(entry)
k, entry, self.properties[k].classname)
u.append(entry)
- l.append((0, k, u))
+ l.append((LINK, k, u))
elif isinstance(propclass, Multilink):
if type(v) is not type([]):
v = [v]
elif isinstance(propclass, Multilink):
if type(v) is not type([]):
v = [v]
raise ValueError, 'new property "%s": %s not a %s'%(
k, entry, self.properties[k].classname)
u.append(entry)
raise ValueError, 'new property "%s": %s not a %s'%(
k, entry, self.properties[k].classname)
u.append(entry)
- l.append((1, k, u))
+ l.append((MULTILINK, k, u))
elif isinstance(propclass, String):
# simple glob searching
v = re.sub(r'([\|\{\}\\\.\+\[\]\(\)])', r'\\\1', v)
v = v.replace('?', '.')
v = v.replace('*', '.*?')
elif isinstance(propclass, String):
# simple glob searching
v = re.sub(r'([\|\{\}\\\.\+\[\]\(\)])', r'\\\1', v)
v = v.replace('?', '.')
v = v.replace('*', '.*?')
- l.append((2, k, re.compile(v, re.I)))
+ l.append((STRING, k, re.compile(v, re.I)))
+ elif isinstance(propclass, Boolean):
+ if type(v) is type(''):
+ bv = v.lower() in ('yes', 'true', 'on', '1')
+ else:
+ bv = v
+ l.append((OTHER, k, bv))
+ elif isinstance(propclass, Number):
+ l.append((OTHER, k, int(v)))
else:
else:
- l.append((6, k, v))
+ l.append((OTHER, k, v))
filterspec = l
# now, find all the nodes that are active and pass filtering
l = []
cldb = self.db.getclassdb(cn)
try:
filterspec = l
# now, find all the nodes that are active and pass filtering
l = []
cldb = self.db.getclassdb(cn)
try:
+ # TODO: only full-scan once (use items())
for nodeid in self.db.getnodeids(cn, cldb):
node = self.db.getnode(cn, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
# apply filter
for t, k, v in filterspec:
for nodeid in self.db.getnodeids(cn, cldb):
node = self.db.getnode(cn, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
# apply filter
for t, k, v in filterspec:
- # this node doesn't have this property, so reject it
- if not node.has_key(k): break
+ # make sure the node has the property
+ if not node.has_key(k):
+ # this node doesn't have this property, so reject it
+ break
- if t == 0 and node[k] not in v:
- # link - if this node'd property doesn't appear in the
+ # now apply the property filter
+ if t == LINK:
+ # link - if this node's property doesn't appear in the
# filterspec's nodeid list, skip it
# filterspec's nodeid list, skip it
- break
- elif t == 1:
+ if node[k] not in v:
+ break
+ elif t == MULTILINK:
# multilink - if any of the nodeids required by the
# filterspec aren't in this node's property, then skip
# it
# multilink - if any of the nodeids required by the
# filterspec aren't in this node's property, then skip
# it
- for value in v:
- if value not in node[k]:
+ have = node[k]
+ for want in v:
+ if want not in have:
break
else:
continue
break
break
else:
continue
break
- elif t == 2 and (node[k] is None or not v.search(node[k])):
+ elif t == STRING:
# RE search
# RE search
- break
- elif t == 6 and node[k] != v:
+ if node[k] is None or not v.search(node[k]):
+ break
+ elif t == OTHER:
# straight value comparison for the other types
# straight value comparison for the other types
- break
+ if node[k] != v:
+ break
else:
l.append((nodeid, node))
finally:
else:
l.append((nodeid, node))
finally:
# filter based on full text search
if search_matches is not None:
k = []
# filter based on full text search
if search_matches is not None:
k = []
- l_debug = []
for v in l:
for v in l:
- l_debug.append(v[0])
if search_matches.has_key(v[0]):
k.append(v)
l = k
if search_matches.has_key(v[0]):
k.append(v)
l = k
elif dir == '-':
r = cmp(len(bv), len(av))
if r != 0: return r
elif dir == '-':
r = cmp(len(bv), len(av))
if r != 0: return r
+ elif isinstance(propclass, Number) or isinstance(propclass, Boolean):
+ if dir == '+':
+ r = cmp(av, bv)
+ elif dir == '-':
+ r = cmp(bv, av)
+
# end for dir, prop in list:
# end for list in sort, group:
# if all else fails, compare the ids
# end for dir, prop in list:
# end for list in sort, group:
# if all else fails, compare the ids
# find all the String properties that have indexme
for prop, propclass in self.getprops().items():
if isinstance(propclass, String) and propclass.indexme:
# find all the String properties that have indexme
for prop, propclass in self.getprops().items():
if isinstance(propclass, String) and propclass.indexme:
- # and index them under (classname, nodeid, property)
- self.db.indexer.add_text((self.classname, nodeid, prop),
- str(self.get(nodeid, prop)))
+ try:
+ value = str(self.get(nodeid, prop))
+ except IndexError:
+ # node no longer exists - entry should be removed
+ self.db.indexer.purge_entry((self.classname, nodeid, prop))
+ else:
+ # and index them under (classname, nodeid, property)
+ self.db.indexer.add_text((self.classname, nodeid, prop),
+ value)
#
# Detector interface
#
# Detector interface
#
#$Log: not supported by cvs2svn $
#
#$Log: not supported by cvs2svn $
+#Revision 1.56 2002/07/31 22:04:33 richard
+#cleanup
+#
+#Revision 1.55 2002/07/30 08:22:38 richard
+#Session storage in the hyperdb was horribly, horribly inefficient. We use
+#a simple anydbm wrapper now - which could be overridden by the metakit
+#backend or RDB backend if necessary.
+#Much, much better.
+#
+#Revision 1.54 2002/07/26 08:26:59 richard
+#Very close now. The cgi and mailgw now use the new security API. The two
+#templates have been migrated to that setup. Lots of unit tests. Still some
+#issue in the web form for editing Roles assigned to users.
+#
+#Revision 1.53 2002/07/25 07:14:06 richard
+#Bugger it. Here's the current shape of the new security implementation.
+#Still to do:
+# . call the security funcs from cgi and mailgw
+# . change shipped templates to include correct initialisation and remove
+# the old config vars
+#... that seems like a lot. The bulk of the work has been done though. Honest :)
+#
+#Revision 1.52 2002/07/19 03:36:34 richard
+#Implemented the destroy() method needed by the session database (and possibly
+#others). At the same time, I removed the leading underscores from the hyperdb
+#methods that Really Didn't Need Them.
+#The journal also raises IndexError now for all situations where there is a
+#request for the journal of a node that doesn't have one. It used to return
+#[] in _some_ situations, but not all. This _may_ break code, but the tests
+#pass...
+#
+#Revision 1.51 2002/07/18 23:07:08 richard
+#Unit tests and a few fixes.
+#
+#Revision 1.50 2002/07/18 11:50:58 richard
+#added tests for number type too
+#
+#Revision 1.49 2002/07/18 11:41:10 richard
+#added tests for boolean type, and fixes to anydbm backend
+#
+#Revision 1.48 2002/07/18 11:17:31 gmcm
+#Add Number and Boolean types to hyperdb.
+#Add conversion cases to web, mail & admin interfaces.
+#Add storage/serialization cases to back_anydbm & back_metakit.
+#
+#Revision 1.47 2002/07/14 23:18:20 richard
+#. fixed the journal bloat from multilink changes - we just log the add or
+# remove operations, not the whole list
+#
+#Revision 1.46 2002/07/14 06:06:34 richard
+#Did some old TODOs
+#
+#Revision 1.45 2002/07/14 04:03:14 richard
+#Implemented a switch to disable journalling for a Class. CGI session
+#database now uses it.
+#
#Revision 1.44 2002/07/14 02:05:53 richard
#. all storage-specific code (ie. backend) is now implemented by the backends
#
#Revision 1.44 2002/07/14 02:05:53 richard
#. all storage-specific code (ie. backend) is now implemented by the backends
#