X-Git-Url: https://git.tokkee.org/?a=blobdiff_plain;f=roundup%2Fbackends%2Fback_anydbm.py;h=ea2533a31b86ad70185de6f407f2f9e0952664ac;hb=ba2393a55084938b58970bc4c817428ce133f36a;hp=db188141017d45a764711dd949585c16be579880;hpb=51f16da93fca36a7766fc79bccf187ac20fea152;p=roundup.git diff --git a/roundup/backends/back_anydbm.py b/roundup/backends/back_anydbm.py index db18814..ea2533a 100644 --- a/roundup/backends/back_anydbm.py +++ b/roundup/backends/back_anydbm.py @@ -15,7 +15,7 @@ # BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, # SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. # -#$Id: back_anydbm.py,v 1.74 2002-09-10 08:04:56 richard Exp $ +#$Id: back_anydbm.py,v 1.86 2002-09-26 03:04:24 richard Exp $ ''' This module defines a backend that saves the hyperdatabase in a database chosen by anydbm. It is guaranteed to always be available in python @@ -73,11 +73,21 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): os.umask(0002) def post_init(self): - '''Called once the schema initialisation has finished.''' + ''' Called once the schema initialisation has finished. + ''' # reindex the db if necessary if self.indexer.should_reindex(): self.reindex() + # figure the "curuserid" + if self.journaltag is None: + self.curuserid = None + elif self.journaltag == 'admin': + # admin user may not exist, but always has ID 1 + self.curuserid = '1' + else: + self.curuserid = self.user.lookup(self.journaltag) + def reindex(self): for klass in self.classes.values(): for nodeid in klass.list(): @@ -121,7 +131,10 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): ''' if __debug__: print >>hyperdb.DEBUG, 'getclass', (self, classname) - return self.classes[classname] + try: + return self.classes[classname] + except KeyError: + raise KeyError, 'There is no class called "%s"'%classname # # Class DBs @@ -154,7 +167,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): if os.path.exists(path): db_type = whichdb.whichdb(path) if not db_type: - raise hyperdb.DatabaseError, "Couldn't identify database type" + raise DatabaseError, "Couldn't identify database type" elif os.path.exists(path+'.db'): # if the path ends in '.db', it's a dbm database, whether # anydbm says it's dbhash or not! @@ -182,7 +195,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): try: dbm = __import__(db_type) except ImportError: - raise hyperdb.DatabaseError, \ + raise DatabaseError, \ "Couldn't open database - the required module '%s'"\ " is not available"%db_type if __debug__: @@ -233,6 +246,15 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): ''' if __debug__: print >>hyperdb.DEBUG, 'addnode', (self, classname, nodeid, node) + + # we'll be supplied these props if we're doing an import + if not node.has_key('creator'): + # add in the "calculated" properties (dupe so we don't affect + # calling code's node assumptions) + node = node.copy() + node['creator'] = self.curuserid + node['creation'] = node['activity'] = date.Date() + self.newnodes.setdefault(classname, {})[nodeid] = 1 self.cache.setdefault(classname, {})[nodeid] = node self.savenode(classname, nodeid, node) @@ -244,6 +266,11 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): print >>hyperdb.DEBUG, 'setnode', (self, classname, nodeid, node) self.dirtynodes.setdefault(classname, {})[nodeid] = 1 + # update the activity time (dupe so we don't affect + # calling code's node assumptions) + node = node.copy() + node['activity'] = date.Date() + # can't set without having already loaded the node self.cache[classname][nodeid] = node self.savenode(classname, nodeid, node) @@ -447,7 +474,8 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # # Journal # - def addjournal(self, classname, nodeid, action, params): + def addjournal(self, classname, nodeid, action, params, creator=None, + creation=None): ''' Journal the Action 'action' may be: @@ -457,9 +485,9 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): ''' if __debug__: print >>hyperdb.DEBUG, 'addjournal', (self, classname, nodeid, - action, params) + action, params, creator, creation) self.transactions.append((self.doSaveJournal, (classname, nodeid, - action, params))) + action, params, creator, creation))) def getjournal(self, classname, nodeid): ''' get the journal for id @@ -496,6 +524,7 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): if __debug__: print >>hyperdb.DEBUG, 'packjournal', (self, pack_before) + pack_before = pack_before.serialise() for classname in self.getclasses(): # get the journal db db_name = 'journals.%s'%classname @@ -512,21 +541,10 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # unpack the entry (nodeid, date_stamp, self.journaltag, action, params) = entry - date_stamp = date.Date(date_stamp) # if the entry is after the pack date, _or_ the initial # create entry, then it stays if date_stamp > pack_before or action == 'create': l.append(entry) - elif action == 'set': - # grab the last set entry to keep information on - # activity - last_set_entry = entry - if last_set_entry: - date_stamp = last_set_entry[1] - # if the last set entry was made after the pack date - # then it is already in the list - if date_stamp < pack_before: - l.append(last_set_entry) db[key] = marshal.dumps(l) if db_type == 'gdbm': db.reorganize() @@ -565,6 +583,9 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): # save the indexer state self.indexer.save_index() + self.clearCache() + + def clearCache(self): # all transactions committed, back to normal self.cache = {} self.dirtynodes = {} @@ -603,28 +624,22 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): self.databases[db_name] = self.opendb(db_name, 'c') return self.databases[db_name] - def doSaveJournal(self, classname, nodeid, action, params): - # handle supply of the special journalling parameters (usually - # supplied on importing an existing database) + def doSaveJournal(self, classname, nodeid, action, params, creator, + creation): + # serialise the parameters now if necessary if isinstance(params, type({})): - if params.has_key('creator'): - journaltag = self.user.get(params['creator'], 'username') - del params['creator'] - else: - journaltag = self.journaltag - if params.has_key('created'): - journaldate = params['created'].serialise() - del params['created'] - else: - journaldate = date.Date().serialise() - if params.has_key('activity'): - del params['activity'] - - # serialise the parameters now if action in ('set', 'create'): params = self.serialise(classname, params) + + # handle supply of the special journalling parameters (usually + # supplied on importing an existing database) + if creator: + journaltag = creator + else: + journaltag = self.curuserid + if creation: + journaldate = creation.serialise() else: - journaltag = self.journaltag journaldate = date.Date().serialise() # create the journal entry @@ -678,6 +693,11 @@ class Database(FileStorage, hyperdb.Database, roundupdb.Database): self.destroyednodes = {} self.transactions = [] + def close(self): + ''' Nothing to do + ''' + pass + _marker = [] class Class(hyperdb.Class): '''The handle to a particular class of nodes in a hyperdatabase.''' @@ -884,7 +904,9 @@ class Class(hyperdb.Class): proptype = properties[prop] value = self.get(nodeid, prop) # "marshal" data where needed - if isinstance(proptype, hyperdb.Date): + if value is None: + pass + elif isinstance(proptype, hyperdb.Date): value = value.get_tuple() elif isinstance(proptype, hyperdb.Interval): value = value.get_tuple() @@ -919,6 +941,9 @@ class Class(hyperdb.Class): if propname == 'id': newid = value continue + elif value is None: + # don't set Nones + continue elif isinstance(prop, hyperdb.Date): value = date.Date(value) elif isinstance(prop, hyperdb.Interval): @@ -927,12 +952,26 @@ class Class(hyperdb.Class): pwd = password.Password() pwd.unpack(value) value = pwd - if value is not None: - d[propname] = value + d[propname] = value - # add + # add the node and journal self.db.addnode(self.classname, newid, d) - self.db.addjournal(self.classname, newid, 'create', d) + + # extract the journalling stuff and nuke it + if d.has_key('creator'): + creator = d['creator'] + del d['creator'] + else: + creator = None + if d.has_key('creation'): + creation = d['creation'] + del d['creation'] + else: + creation = None + if d.has_key('activity'): + del d['activity'] + self.db.addjournal(self.classname, newid, 'create', d, creator, + creation) return newid def get(self, nodeid, propname, default=_marker, cache=1): @@ -953,7 +992,13 @@ class Class(hyperdb.Class): if propname == 'id': return nodeid + # get the node's dict + d = self.db.getnode(self.classname, nodeid, cache=cache) + + # check for one of the special props if propname == 'creation': + if d.has_key('creation'): + return d['creation'] if not self.do_journal: raise ValueError, 'Journalling is disabled for this class' journal = self.db.getjournal(self.classname, nodeid) @@ -963,6 +1008,8 @@ class Class(hyperdb.Class): # on the strange chance that there's no journal return date.Date() if propname == 'activity': + if d.has_key('activity'): + return d['activity'] if not self.do_journal: raise ValueError, 'Journalling is disabled for this class' journal = self.db.getjournal(self.classname, nodeid) @@ -972,20 +1019,29 @@ class Class(hyperdb.Class): # on the strange chance that there's no journal return date.Date() if propname == 'creator': + if d.has_key('creator'): + return d['creator'] if not self.do_journal: raise ValueError, 'Journalling is disabled for this class' journal = self.db.getjournal(self.classname, nodeid) if journal: - return self.db.getjournal(self.classname, nodeid)[0][2] + num_re = re.compile('^\d+$') + value = self.db.getjournal(self.classname, nodeid)[0][2] + if num_re.match(value): + return value + else: + # old-style "username" journal tag + try: + return self.db.user.lookup(value) + except KeyError: + # user's been retired, return admin + return '1' else: - return self.db.journaltag + return self.db.curuserid # get the property (raises KeyErorr if invalid) prop = self.properties[propname] - # get the node's dict - d = self.db.getnode(self.classname, nodeid, cache=cache) - if not d.has_key(propname): if default is _marker: if isinstance(prop, Multilink): @@ -1001,7 +1057,7 @@ class Class(hyperdb.Class): return d[propname] - # XXX not in spec + # not in spec def getnode(self, nodeid, cache=1): ''' Return a convenience wrapper for the node. @@ -1081,7 +1137,11 @@ class Class(hyperdb.Class): # this will raise the KeyError if the property isn't valid # ... we don't use getprops() here because we only care about # the writeable properties. - prop = self.properties[propname] + try: + prop = self.properties[propname] + except KeyError: + raise KeyError, '"%s" has no property named "%s"'%( + self.classname, propname) # if the value's the same as the existing value, no sense in # doing anything @@ -1354,7 +1414,7 @@ class Class(hyperdb.Class): otherwise a KeyError is raised. ''' if not self.key: - raise TypeError, 'No key property set' + raise TypeError, 'No key property set for class %s'%self.classname cldb = self.db.getclassdb(self.classname) try: for nodeid in self.db.getnodeids(self.classname, cldb): @@ -1366,20 +1426,24 @@ class Class(hyperdb.Class): return nodeid finally: cldb.close() - raise KeyError, keyvalue + raise KeyError, 'No key (%s) value "%s" for "%s"'%(self.key, + keyvalue, self.classname) - # XXX: change from spec - allows multiple props to match + # change from spec - allows multiple props to match def find(self, **propspec): '''Get the ids of nodes in this class which link to the given nodes. - 'propspec' consists of keyword args propname={nodeid:1,} - 'propname' must be the name of a property in this class, or a - KeyError is raised. That property must be a Link or Multilink - property, or a TypeError is raised. + 'propspec' consists of keyword args propname=nodeid or + propname={nodeid:1, } + 'propname' must be the name of a property in this class, or a + KeyError is raised. That property must be a Link or + Multilink property, or a TypeError is raised. Any node in this class whose 'propname' property links to any of the nodeids will be returned. Used by the full text indexing, which knows - that "foo" occurs in msg1, msg3 and file7, so we have hits on these issues: + that "foo" occurs in msg1, msg3 and file7, so we have hits on these + issues: + db.issue.find(messages={'1':1,'3':1}, files={'7':1}) ''' propspec = propspec.items() @@ -1477,6 +1541,10 @@ class Class(hyperdb.Class): "sort" and "group" are (dir, prop) where dir is '+', '-' or None and prop is a prop name or None "search_matches" is {nodeid: marker} + + The filter must match all properties specificed - but if the + property value to match is a list, any one of the values in the + list may match for that property to match. ''' cn = self.classname @@ -1729,9 +1797,7 @@ class Class(hyperdb.Class): d['id'] = String() d['creation'] = hyperdb.Date() d['activity'] = hyperdb.Date() - # can't be a link to user because the user might have been - # retired since the journal entry was created - d['creator'] = hyperdb.String() + d['creator'] = hyperdb.Link('user') return d def addprop(self, **properties): @@ -1820,7 +1886,7 @@ class FileClass(Class): # extract the "content" property from the proplist i = propnames.index('content') - content = proplist[i] + content = eval(proplist[i]) del propnames[i] del proplist[i] @@ -1855,8 +1921,7 @@ class FileClass(Class): modified. ''' d = Class.getprops(self, protected=protected).copy() - if protected: - d['content'] = hyperdb.String() + d['content'] = hyperdb.String() return d def index(self, nodeid): @@ -1881,7 +1946,7 @@ class FileClass(Class): self.db.indexer.add_text((self.classname, nodeid, 'content'), content, mime_type) -# XXX deviation from spec - was called ItemClass +# deviation from spec - was called ItemClass class IssueClass(Class, roundupdb.IssueClass): # Overridden methods: def __init__(self, db, classname, **properties):