index 91f8a76a4221d7ee7be304f19af475493bdf89e1..d2897addf497c0f019d6cd5bbae4bf8edf6d6eff 100644 (file)
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-#$Id: back_anydbm.py,v 1.79 2002-09-15 23:06:20 richard Exp $
+#$Id: back_anydbm.py,v 1.131 2003-11-14 00:11:18 richard Exp $
'''
This module defines a backend that saves the hyperdatabase in a database
chosen by anydbm. It is guaranteed to always be available in python
serious bugs, and is not available)
'''
-import whichdb, anydbm, os, marshal, re, weakref, string, copy
+try:
+ import anydbm, sys
+ # dumbdbm only works in python 2.1.2+
+ if sys.version_info < (2,1,2):
+ import dumbdbm
+ assert anydbm._defaultmod != dumbdbm
+ del dumbdbm
+except AssertionError:
+ print "WARNING: you should upgrade to python 2.1.3"
+
+import whichdb, os, marshal, re, weakref, string, copy
from roundup import hyperdb, date, password, roundupdb, security
from blobfiles import FileStorage
-from sessions import Sessions
+from sessions import Sessions, OneTimeKeys
from roundup.indexer import Indexer
-from locking import acquire_lock, release_lock
+from roundup.backends import locking
from roundup.hyperdb import String, Password, Date, Interval, Link, \
- Multilink, DatabaseError, Boolean, Number
+ Multilink, DatabaseError, Boolean, Number, Node
+from roundup.date import Range
#
# Now the database
The 'journaltag' is a token that will be attached to the journal
entries for any edits done on the database. If 'journaltag' is
None, the database is opened in read-only mode: the Class.create(),
- Class.set(), and Class.retire() methods are disabled.
- '''
+ Class.set(), Class.retire(), and Class.restore() methods are
+ disabled.
+ '''
self.config, self.journaltag = config, journaltag
self.dir = config.DATABASE
self.classes = {}
self.transactions = []
self.indexer = Indexer(self.dir)
self.sessions = Sessions(self.config)
+ self.otks = OneTimeKeys(self.config)
self.security = security.Security(self)
# ensure files are group readable and writable
os.umask(0002)
+ # lock it
+ lockfilenm = os.path.join(self.dir, 'lock')
+ self.lockfile = locking.acquire_lock(lockfilenm)
+ self.lockfile.write(str(os.getpid()))
+ self.lockfile.flush()
+
def post_init(self):
- '''Called once the schema initialisation has finished.'''
+ ''' Called once the schema initialisation has finished.
+ '''
# reindex the db if necessary
if self.indexer.should_reindex():
self.reindex()
+ def refresh_database(self):
+ "Rebuild the database"
+ self.reindex()
+
def reindex(self):
for klass in self.classes.values():
for nodeid in klass.list():
mode)
return dbm.open(path, mode)
- def lockdb(self, name):
- ''' Lock a database file
- '''
- path = os.path.join(os.getcwd(), self.dir, '%s.lock'%name)
- return acquire_lock(path)
-
#
# Node IDs
#
''' Generate a new id for the given class
'''
# open the ids DB - create if if doesn't exist
- lock = self.lockdb('_ids')
db = self.opendb('_ids', 'c')
if db.has_key(classname):
newid = db[classname] = str(int(db[classname]) + 1)
newid = str(self.getclass(classname).count()+1)
db[classname] = newid
db.close()
- release_lock(lock)
return newid
def setid(self, classname, setid):
''' Set the id counter: used during import of database
'''
# open the ids DB - create if if doesn't exist
- lock = self.lockdb('_ids')
db = self.opendb('_ids', 'c')
db[classname] = str(setid)
db.close()
- release_lock(lock)
#
# Nodes
'''
if __debug__:
print >>hyperdb.DEBUG, 'addnode', (self, classname, nodeid, node)
+
+ # we'll be supplied these props if we're doing an import
+ if not node.has_key('creator'):
+ # add in the "calculated" properties (dupe so we don't affect
+ # calling code's node assumptions)
+ node = node.copy()
+ node['creator'] = self.getuid()
+ node['creation'] = node['activity'] = date.Date()
+
self.newnodes.setdefault(classname, {})[nodeid] = 1
self.cache.setdefault(classname, {})[nodeid] = node
self.savenode(classname, nodeid, node)
print >>hyperdb.DEBUG, 'setnode', (self, classname, nodeid, node)
self.dirtynodes.setdefault(classname, {})[nodeid] = 1
+ # update the activity time (dupe so we don't affect
+ # calling code's node assumptions)
+ node = node.copy()
+ node['activity'] = date.Date()
+
# can't set without having already loaded the node
self.cache[classname][nodeid] = node
self.savenode(classname, nodeid, node)
def getnode(self, classname, nodeid, db=None, cache=1):
''' get a node from the database
+
+ Note the "cache" parameter is not used, and exists purely for
+ backward compatibility!
'''
if __debug__:
print >>hyperdb.DEBUG, 'getnode', (self, classname, nodeid, db)
- if cache:
- # try the cache
- cache_dict = self.cache.setdefault(classname, {})
- if cache_dict.has_key(nodeid):
- if __debug__:
- print >>hyperdb.TRACE, 'get %s %s cached'%(classname,
- nodeid)
- return cache_dict[nodeid]
+
+ # try the cache
+ cache_dict = self.cache.setdefault(classname, {})
+ if cache_dict.has_key(nodeid):
+ if __debug__:
+ print >>hyperdb.TRACE, 'get %s %s cached'%(classname,
+ nodeid)
+ return cache_dict[nodeid]
if __debug__:
print >>hyperdb.TRACE, 'get %s %s'%(classname, nodeid)
if db is None:
db = self.getclassdb(classname)
if not db.has_key(nodeid):
+ # try the cache - might be a brand-new node
+ cache_dict = self.cache.setdefault(classname, {})
+ if cache_dict.has_key(nodeid):
+ if __debug__:
+ print >>hyperdb.TRACE, 'get %s %s cached'%(classname,
+ nodeid)
+ return cache_dict[nodeid]
raise IndexError, "no such %s %s"%(classname, nodeid)
# check the uncommitted, destroyed nodes
# get the property spec
prop = properties[k]
- if isinstance(prop, Password):
+ if isinstance(prop, Password) and v is not None:
d[k] = str(v)
elif isinstance(prop, Date) and v is not None:
d[k] = v.serialise()
d[k] = date.Date(v)
elif isinstance(prop, Interval) and v is not None:
d[k] = date.Interval(v)
- elif isinstance(prop, Password):
+ elif isinstance(prop, Password) and v is not None:
p = password.Password()
p.unpack(v)
d[k] = p
count = count + len(db.keys())
return count
- def getnodeids(self, classname, db=None):
- if __debug__:
- print >>hyperdb.DEBUG, 'getnodeids', (self, classname, db)
-
- res = []
-
- # start off with the new nodes
- if self.newnodes.has_key(classname):
- res += self.newnodes[classname].keys()
-
- if db is None:
- db = self.getclassdb(classname)
- res = res + db.keys()
-
- # remove the uncommitted, destroyed nodes
- if self.destroyednodes.has_key(classname):
- for nodeid in self.destroyednodes[classname].keys():
- if db.has_key(nodeid):
- res.remove(nodeid)
-
- return res
-
#
# Files - special node properties
'''
if __debug__:
print >>hyperdb.DEBUG, 'getjournal', (self, classname, nodeid)
+
+ # our journal result
+ res = []
+
+ # add any journal entries for transactions not committed to the
+ # database
+ for method, args in self.transactions:
+ if method != self.doSaveJournal:
+ continue
+ (cache_classname, cache_nodeid, cache_action, cache_params,
+ cache_creator, cache_creation) = args
+ if cache_classname == classname and cache_nodeid == nodeid:
+ if not cache_creator:
+ cache_creator = self.getuid()
+ if not cache_creation:
+ cache_creation = date.Date()
+ res.append((cache_nodeid, cache_creation, cache_creator,
+ cache_action, cache_params))
+
# attempt to open the journal - in some rare cases, the journal may
# not exist
try:
if str(error) == "need 'c' or 'n' flag to open new db":
raise IndexError, 'no such %s %s'%(classname, nodeid)
elif error.args[0] != 2:
+ # this isn't a "not found" error, be alarmed!
raise
+ if res:
+ # we have unsaved journal entries, return them
+ return res
raise IndexError, 'no such %s %s'%(classname, nodeid)
try:
journal = marshal.loads(db[nodeid])
except KeyError:
db.close()
+ if res:
+ # we have some unsaved journal entries, be happy!
+ return res
raise IndexError, 'no such %s %s'%(classname, nodeid)
db.close()
- res = []
+
+ # add all the saved journal entries for this node
for nodeid, date_stamp, user, action, params in journal:
res.append((nodeid, date.Date(date_stamp), user, action, params))
return res
if __debug__:
print >>hyperdb.DEBUG, 'packjournal', (self, pack_before)
+ pack_before = pack_before.serialise()
for classname in self.getclasses():
# get the journal db
db_name = 'journals.%s'%classname
# unpack the entry
(nodeid, date_stamp, self.journaltag, action,
params) = entry
- date_stamp = date.Date(date_stamp)
# if the entry is after the pack date, _or_ the initial
# create entry, then it stays
if date_stamp > pack_before or action == 'create':
l.append(entry)
- elif action == 'set':
- # grab the last set entry to keep information on
- # activity
- last_set_entry = entry
- if last_set_entry:
- date_stamp = last_set_entry[1]
- # if the last set entry was made after the pack date
- # then it is already in the list
- if date_stamp < pack_before:
- l.append(last_set_entry)
db[key] = marshal.dumps(l)
if db_type == 'gdbm':
db.reorganize()
'''
if __debug__:
print >>hyperdb.DEBUG, 'commit', (self,)
- # TODO: lock the DB
# keep a handle to all the database files opened
self.databases = {}
- # now, do all the transactions
- reindex = {}
- for method, args in self.transactions:
- reindex[method(*args)] = 1
-
- # now close all the database files
- for db in self.databases.values():
- db.close()
- del self.databases
- # TODO: unlock the DB
+ try:
+ # now, do all the transactions
+ reindex = {}
+ for method, args in self.transactions:
+ reindex[method(*args)] = 1
+ finally:
+ # make sure we close all the database files
+ for db in self.databases.values():
+ db.close()
+ del self.databases
# reindex the nodes that request it
for classname, nodeid in filter(None, reindex.keys()):
# save the indexer state
self.indexer.save_index()
+ self.clearCache()
+
+ def clearCache(self):
# all transactions committed, back to normal
self.cache = {}
self.dirtynodes = {}
if creator:
journaltag = creator
else:
- journaltag = self.journaltag
+ journaltag = self.getuid()
if creation:
journaldate = creation.serialise()
else:
def close(self):
''' Nothing to do
'''
- pass
+ if self.lockfile is not None:
+ locking.release_lock(self.lockfile)
+ if self.lockfile is not None:
+ self.lockfile.close()
+ self.lockfile = None
_marker = []
class Class(hyperdb.Class):
# do the db-related init stuff
db.addclass(self)
- self.auditors = {'create': [], 'set': [], 'retire': []}
- self.reactors = {'create': [], 'set': [], 'retire': []}
+ self.auditors = {'create': [], 'set': [], 'retire': [], 'restore': []}
+ self.reactors = {'create': [], 'set': [], 'retire': [], 'restore': []}
def enableJournalling(self):
'''Turn journalling on for this class
These operations trigger detectors and can be vetoed. Attempts
to modify the "creation" or "activity" properties cause a KeyError.
'''
+ self.fireAuditors('create', None, propvalues)
+ newid = self.create_inner(**propvalues)
+ self.fireReactors('create', newid, None)
+ return newid
+
+ def create_inner(self, **propvalues):
+ ''' Called by create, in-between the audit and react calls.
+ '''
if propvalues.has_key('id'):
raise KeyError, '"id" is reserved'
if propvalues.has_key('creation') or propvalues.has_key('activity'):
raise KeyError, '"creation" and "activity" are reserved'
-
- self.fireAuditors('create', None, propvalues)
-
# new node's id
newid = self.db.newid(self.classname)
(self.classname, newid, key))
elif isinstance(prop, String):
- if type(value) != type(''):
+ if type(value) != type('') and type(value) != type(u''):
raise TypeError, 'new property "%s" not a string'%key
elif isinstance(prop, Password):
# done
self.db.addnode(self.classname, newid, propvalues)
if self.do_journal:
- self.db.addjournal(self.classname, newid, 'create', propvalues)
-
- self.fireReactors('create', newid, None)
+ self.db.addjournal(self.classname, newid, 'create', {})
return newid
elif isinstance(proptype, hyperdb.Password):
value = str(value)
l.append(repr(value))
+
+ # append retired flag
+ l.append(repr(self.is_retired(nodeid)))
+
return l
def import_list(self, propnames, proplist):
# make the new node's property map
d = {}
+ newid = None
for i in range(len(propnames)):
- # Use eval to reverse the repr() used to output the CSV
- value = eval(proplist[i])
-
# Figure the property for this column
propname = propnames[i]
- prop = properties[propname]
+
+ # Use eval to reverse the repr() used to output the CSV
+ value = eval(proplist[i])
# "unmarshal" where necessary
if propname == 'id':
newid = value
continue
+ elif propname == 'is retired':
+ # is the item retired?
+ if int(value):
+ d[self.db.RETIRED_FLAG] = 1
+ continue
elif value is None:
- # don't set Nones
+ d[propname] = None
continue
- elif isinstance(prop, hyperdb.Date):
+
+ prop = properties[propname]
+ if isinstance(prop, hyperdb.Date):
value = date.Date(value)
elif isinstance(prop, hyperdb.Interval):
value = date.Interval(value)
value = pwd
d[propname] = value
- # extract the extraneous journalling gumpf and nuke it
+ # get a new id if necessary
+ if newid is None:
+ newid = self.db.newid(self.classname)
+
+ # add the node and journal
+ self.db.addnode(self.classname, newid, d)
+
+ # extract the journalling stuff and nuke it
if d.has_key('creator'):
creator = d['creator']
del d['creator']
creation = None
if d.has_key('activity'):
del d['activity']
-
- # add the node and journal
- self.db.addnode(self.classname, newid, d)
- self.db.addjournal(self.classname, newid, 'create', d, creator,
+ self.db.addjournal(self.classname, newid, 'create', {}, creator,
creation)
return newid
IndexError is raised. 'propname' must be the name of a property
of this class or a KeyError is raised.
- 'cache' indicates whether the transaction cache should be queried
- for the node. If the node has been modified and you need to
- determine what its values prior to modification are, you need to
- set cache=0.
+ 'cache' exists for backward compatibility, and is not used.
Attempts to get the "creation" or "activity" properties should
do the right thing.
if propname == 'id':
return nodeid
+ # get the node's dict
+ d = self.db.getnode(self.classname, nodeid)
+
+ # check for one of the special props
if propname == 'creation':
+ if d.has_key('creation'):
+ return d['creation']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
# on the strange chance that there's no journal
return date.Date()
if propname == 'activity':
+ if d.has_key('activity'):
+ return d['activity']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
# on the strange chance that there's no journal
return date.Date()
if propname == 'creator':
+ if d.has_key('creator'):
+ return d['creator']
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
journal = self.db.getjournal(self.classname, nodeid)
if journal:
- return self.db.getjournal(self.classname, nodeid)[0][2]
+ num_re = re.compile('^\d+$')
+ value = self.db.getjournal(self.classname, nodeid)[0][2]
+ if num_re.match(value):
+ return value
+ else:
+ # old-style "username" journal tag
+ try:
+ return self.db.user.lookup(value)
+ except KeyError:
+ # user's been retired, return admin
+ return '1'
else:
- return self.db.journaltag
+ return self.db.getuid()
# get the property (raises KeyErorr if invalid)
prop = self.properties[propname]
- # get the node's dict
- d = self.db.getnode(self.classname, nodeid, cache=cache)
-
if not d.has_key(propname):
if default is _marker:
if isinstance(prop, Multilink):
'nodeid' must be the id of an existing node of this class or an
IndexError is raised.
- 'cache' indicates whether the transaction cache should be queried
- for the node. If the node has been modified and you need to
- determine what its values prior to modification are, you need to
- set cache=0.
+ 'cache' exists for backwards compatibility, and is not used.
'''
- return Node(self, nodeid, cache=cache)
+ return Node(self, nodeid)
def set(self, nodeid, **propvalues):
'''Modify a property on an existing node of this class.
self.fireAuditors('set', nodeid, propvalues)
# Take a copy of the node dict so that the subsequent set
# operation doesn't modify the oldvalues structure.
- try:
- # try not using the cache initially
- oldvalues = copy.deepcopy(self.db.getnode(self.classname, nodeid,
- cache=0))
- except IndexError:
- # this will be needed if somone does a create() and set()
- # with no intervening commit()
- oldvalues = copy.deepcopy(self.db.getnode(self.classname, nodeid))
+ oldvalues = copy.deepcopy(self.db.getnode(self.classname, nodeid))
node = self.db.getnode(self.classname, nodeid)
if node.has_key(self.db.RETIRED_FLAG):
# this will raise the KeyError if the property isn't valid
# ... we don't use getprops() here because we only care about
# the writeable properties.
- prop = self.properties[propname]
+ try:
+ prop = self.properties[propname]
+ except KeyError:
+ raise KeyError, '"%s" has no property named "%s"'%(
+ self.classname, propname)
# if the value's the same as the existing value, no sense in
# doing anything
- if node.has_key(propname) and value == node[propname]:
+ current = node.get(propname, None)
+ if value == current:
del propvalues[propname]
continue
+ journalvalues[propname] = current
# do stuff based on the prop type
if isinstance(prop, Link):
if self.do_journal and prop.do_journal:
# register the unlink with the old linked node
- if node[propname] is not None:
+ if node.has_key(propname) and node[propname] is not None:
self.db.addjournal(link_class, node[propname], 'unlink',
(self.classname, nodeid, propname))
journalvalues[propname] = tuple(l)
elif isinstance(prop, String):
- if value is not None and type(value) != type(''):
+ if value is not None and type(value) != type('') and type(value) != type(u''):
raise TypeError, 'new property "%s" not a string'%propname
elif isinstance(prop, Password):
self.db.setnode(self.classname, nodeid, node)
if self.do_journal:
- propvalues.update(journalvalues)
- self.db.addjournal(self.classname, nodeid, 'set', propvalues)
+ self.db.addjournal(self.classname, nodeid, 'set', journalvalues)
self.fireReactors('set', nodeid, oldvalues)
self.fireReactors('retire', nodeid, None)
- def is_retired(self, nodeid):
+ def restore(self, nodeid):
+ '''Restpre a retired node.
+
+ Make node available for all operations like it was before retirement.
+ '''
+ if self.db.journaltag is None:
+ raise DatabaseError, 'Database open read-only'
+
+ node = self.db.getnode(self.classname, nodeid)
+ # check if key property was overrided
+ key = self.getkey()
+ try:
+ id = self.lookup(node[key])
+ except KeyError:
+ pass
+ else:
+ raise KeyError, "Key property (%s) of retired node clashes with \
+ existing one (%s)" % (key, node[key])
+ # Now we can safely restore node
+ self.fireAuditors('restore', nodeid, None)
+ del node[self.db.RETIRED_FLAG]
+ self.db.setnode(self.classname, nodeid, node)
+ if self.do_journal:
+ self.db.addjournal(self.classname, nodeid, 'restored', None)
+
+ self.fireReactors('restore', nodeid, None)
+
+ def is_retired(self, nodeid, cldb=None):
'''Return true if the node is retired.
'''
- node = self.db.getnode(cn, nodeid, cldb)
+ node = self.db.getnode(self.classname, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
return 1
return 0
def destroy(self, nodeid):
'''Destroy a node.
-
+
WARNING: this method should never be used except in extremely rare
situations where there could never be links to the node being
deleted
The returned list contains tuples of the form
- (date, tag, action, params)
+ (nodeid, date, tag, action, params)
'date' is a Timestamp object specifying the time of the change and
'tag' is the journaltag specified when the database was opened.
raise TypeError, 'No key property set for class %s'%self.classname
cldb = self.db.getclassdb(self.classname)
try:
- for nodeid in self.db.getnodeids(self.classname, cldb):
+ for nodeid in self.getnodeids(cldb):
node = self.db.getnode(self.classname, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
if node[self.key] == keyvalue:
- cldb.close()
return nodeid
finally:
cldb.close()
- raise KeyError, keyvalue
+ raise KeyError, 'No key (%s) value "%s" for "%s"'%(self.key,
+ keyvalue, self.classname)
# change from spec - allows multiple props to match
def find(self, **propspec):
- '''Get the ids of nodes in this class which link to the given nodes.
+ '''Get the ids of items in this class which link to the given items.
+
+ 'propspec' consists of keyword args propname=itemid or
+ propname={itemid:1, }
+ 'propname' must be the name of a property in this class, or a
+ KeyError is raised. That property must be a Link or
+ Multilink property, or a TypeError is raised.
- 'propspec' consists of keyword args propname={nodeid:1,}
- 'propname' must be the name of a property in this class, or a
- KeyError is raised. That property must be a Link or Multilink
- property, or a TypeError is raised.
+ Any item in this class whose 'propname' property links to any of the
+ itemids will be returned. Used by the full text indexing, which knows
+ that "foo" occurs in msg1, msg3 and file7, so we have hits on these
+ issues:
- Any node in this class whose 'propname' property links to any of the
- nodeids will be returned. Used by the full text indexing, which knows
- that "foo" occurs in msg1, msg3 and file7, so we have hits on these issues:
db.issue.find(messages={'1':1,'3':1}, files={'7':1})
'''
propspec = propspec.items()
- for propname, nodeids in propspec:
+ for propname, itemids in propspec:
# check the prop is OK
prop = self.properties[propname]
if not isinstance(prop, Link) and not isinstance(prop, Multilink):
cldb = self.db.getclassdb(self.classname)
l = []
try:
- for id in self.db.getnodeids(self.classname, db=cldb):
- node = self.db.getnode(self.classname, id, db=cldb)
- if node.has_key(self.db.RETIRED_FLAG):
+ for id in self.getnodeids(db=cldb):
+ item = self.db.getnode(self.classname, id, db=cldb)
+ if item.has_key(self.db.RETIRED_FLAG):
continue
- for propname, nodeids in propspec:
- # can't test if the node doesn't have this property
- if not node.has_key(propname):
+ for propname, itemids in propspec:
+ # can't test if the item doesn't have this property
+ if not item.has_key(propname):
continue
- if type(nodeids) is type(''):
- nodeids = {nodeids:1}
+ if type(itemids) is not type({}):
+ itemids = {itemids:1}
+
+ # grab the property definition and its value on this item
prop = self.properties[propname]
- value = node[propname]
- if isinstance(prop, Link) and nodeids.has_key(value):
+ value = item[propname]
+ if isinstance(prop, Link) and itemids.has_key(value):
l.append(id)
break
elif isinstance(prop, Multilink):
hit = 0
for v in value:
- if nodeids.has_key(v):
+ if itemids.has_key(v):
l.append(id)
hit = 1
break
l = []
cldb = self.db.getclassdb(self.classname)
try:
- for nodeid in self.db.getnodeids(self.classname, cldb):
+ for nodeid in self.getnodeids(cldb):
node = self.db.getnode(self.classname, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
for key, value in requirements.items():
+ if not node.has_key(key):
+ break
if node[key] is None or node[key].lower() != value:
break
else:
cn = self.classname
cldb = self.db.getclassdb(cn)
try:
- for nodeid in self.db.getnodeids(cn, cldb):
+ for nodeid in self.getnodeids(cldb):
node = self.db.getnode(cn, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
l.sort()
return l
- def filter(self, search_matches, filterspec, sort, group,
- num_re = re.compile('^\d+$')):
+ def getnodeids(self, db=None):
+ ''' Return a list of ALL nodeids
+ '''
+ if __debug__:
+ print >>hyperdb.DEBUG, 'getnodeids', (self, self.classname, db)
+
+ res = []
+
+ # start off with the new nodes
+ if self.db.newnodes.has_key(self.classname):
+ res += self.db.newnodes[self.classname].keys()
+
+ if db is None:
+ db = self.db.getclassdb(self.classname)
+ res = res + db.keys()
+
+ # remove the uncommitted, destroyed nodes
+ if self.db.destroyednodes.has_key(self.classname):
+ for nodeid in self.db.destroyednodes[self.classname].keys():
+ if db.has_key(nodeid):
+ res.remove(nodeid)
+
+ return res
+
+ def filter(self, search_matches, filterspec, sort=(None,None),
+ group=(None,None), num_re = re.compile('^\d+$')):
''' Return a list of the ids of the active nodes in this class that
match the 'filter' spec, sorted by the group spec and then the
sort spec.
"sort" and "group" are (dir, prop) where dir is '+', '-' or None
and prop is a prop name or None
"search_matches" is {nodeid: marker}
+
+ The filter must match all properties specificed - but if the
+ property value to match is a list, any one of the values in the
+ list may match for that property to match. Unless the property
+ is a Multilink, in which case the item's property list must
+ match the filterspec list.
'''
cn = self.classname
LINK = 0
MULTILINK = 1
STRING = 2
+ DATE = 3
+ INTERVAL = 4
OTHER = 6
+
+ timezone = self.db.getUserTimezone()
for k, v in filterspec.items():
propclass = props[k]
if isinstance(propclass, Link):
u = []
link_class = self.db.classes[propclass.classname]
for entry in v:
- if entry == '-1': entry = None
+ # the value -1 is a special "not set" sentinel
+ if entry == '-1':
+ entry = None
elif not num_re.match(entry):
try:
entry = link_class.lookup(entry)
l.append((LINK, k, u))
elif isinstance(propclass, Multilink):
- if type(v) is not type([]):
+ # the value -1 is a special "not set" sentinel
+ if v in ('-1', ['-1']):
+ v = []
+ elif type(v) is not type([]):
v = [v]
+
# replace key values with node ids
u = []
link_class = self.db.classes[propclass.classname]
raise ValueError, 'new property "%s": %s not a %s'%(
k, entry, self.properties[k].classname)
u.append(entry)
+ u.sort()
l.append((MULTILINK, k, u))
- elif isinstance(propclass, String):
- # simple glob searching
- v = re.sub(r'([\|\{\}\\\.\+\[\]\(\)])', r'\\\1', v)
- v = v.replace('?', '.')
- v = v.replace('*', '.*?')
- l.append((STRING, k, re.compile(v, re.I)))
+ elif isinstance(propclass, String) and k != 'id':
+ if type(v) is not type([]):
+ v = [v]
+ m = []
+ for v in v:
+ # simple glob searching
+ v = re.sub(r'([\|\{\}\\\.\+\[\]\(\)])', r'\\\1', v)
+ v = v.replace('?', '.')
+ v = v.replace('*', '.*?')
+ m.append(v)
+ m = re.compile('(%s)'%('|'.join(m)), re.I)
+ l.append((STRING, k, m))
+ elif isinstance(propclass, Date):
+ try:
+ date_rng = Range(v, date.Date, offset=timezone)
+ l.append((DATE, k, date_rng))
+ except ValueError:
+ # If range creation fails - ignore that search parameter
+ pass
+ elif isinstance(propclass, Interval):
+ try:
+ intv_rng = Range(v, date.Interval)
+ l.append((INTERVAL, k, intv_rng))
+ except ValueError:
+ # If range creation fails - ignore that search parameter
+ pass
+
elif isinstance(propclass, Boolean):
if type(v) is type(''):
bv = v.lower() in ('yes', 'true', 'on', '1')
cldb = self.db.getclassdb(cn)
try:
# TODO: only full-scan once (use items())
- for nodeid in self.db.getnodeids(cn, cldb):
+ for nodeid in self.getnodeids(cldb):
node = self.db.getnode(cn, nodeid, cldb)
if node.has_key(self.db.RETIRED_FLAG):
continue
# apply filter
for t, k, v in filterspec:
+ # handle the id prop
+ if k == 'id' and v == nodeid:
+ continue
+
# make sure the node has the property
if not node.has_key(k):
# this node doesn't have this property, so reject it
# filterspec aren't in this node's property, then skip
# it
have = node[k]
+ # check for matching the absence of multilink values
+ if not v and have:
+ break
+
+ # othewise, make sure this node has each of the
+ # required values
for want in v:
if want not in have:
break
continue
break
elif t == STRING:
+ if node[k] is None:
+ break
# RE search
- if node[k] is None or not v.search(node[k]):
+ if not v.search(node[k]):
break
+ elif t == DATE or t == INTERVAL:
+ if node[k] is None:
+ break
+ if v.to_value:
+ if not (v.from_value <= node[k] and v.to_value >= node[k]):
+ break
+ else:
+ if not (v.from_value <= node[k]):
+ break
elif t == OTHER:
# straight value comparison for the other types
if node[k] != v:
if isinstance(propclass, String):
# clean up the strings
if av and av[0] in string.uppercase:
- av = an[prop] = av.lower()
+ av = av.lower()
if bv and bv[0] in string.uppercase:
- bv = bn[prop] = bv.lower()
+ bv = bv.lower()
if (isinstance(propclass, String) or
isinstance(propclass, Date)):
# it might be a string that's really an integer
r = cmp(bv, av)
if r != 0: return r
- # Multilink properties are sorted according to how many
- # links are present.
- elif isinstance(propclass, Multilink):
- if dir == '+':
- r = cmp(len(av), len(bv))
- if r != 0: return r
- elif dir == '-':
- r = cmp(len(bv), len(av))
- if r != 0: return r
- elif isinstance(propclass, Number) or isinstance(propclass, Boolean):
+ else:
+ # all other types just compare
if dir == '+':
r = cmp(av, bv)
elif dir == '-':
r = cmp(bv, av)
+ if r != 0: return r
# end for dir, prop in sort, group:
# if all else fails, compare the ids
d['id'] = String()
d['creation'] = hyperdb.Date()
d['activity'] = hyperdb.Date()
- # can't be a link to user because the user might have been
- # retired since the journal entry was created
- d['creator'] = hyperdb.String()
+ d['creator'] = hyperdb.Link('user')
return d
def addprop(self, **properties):
for react in self.reactors[action]:
react(self.db, self, nodeid, oldvalues)
-class FileClass(Class):
+class FileClass(Class, hyperdb.FileClass):
'''This class defines a large chunk of data. To support this, it has a
mandatory String property "content" which is typically saved off
externally to the hyperdb.
default_mime_type = 'text/plain'
def create(self, **propvalues):
- ''' snaffle the file propvalue and store in a file
+ ''' Snarf the "content" propvalue and store in a file
'''
+ # we need to fire the auditors now, or the content property won't
+ # be in propvalues for the auditors to play with
+ self.fireAuditors('create', None, propvalues)
+
+ # now remove the content property so it's not stored in the db
content = propvalues['content']
del propvalues['content']
- newid = Class.create(self, **propvalues)
+
+ # do the database create
+ newid = Class.create_inner(self, **propvalues)
+
+ # fire reactors
+ self.fireReactors('create', newid, None)
+
+ # store off the content as a file
self.db.storefile(self.classname, newid, None, content)
return newid
return newid
def get(self, nodeid, propname, default=_marker, cache=1):
- ''' trap the content propname and get it from the file
- '''
+ ''' Trap the content propname and get it from the file
- poss_msg = 'Possibly a access right configuration problem.'
+ 'cache' exists for backwards compatibility, and is not used.
+ '''
+ poss_msg = 'Possibly an access right configuration problem.'
if propname == 'content':
try:
return self.db.getfile(self.classname, nodeid, None)
except IOError, (strerror):
- # BUG: by catching this we donot see an error in the log.
+ # XXX by catching this we donot see an error in the log.
return 'ERROR reading file: %s%s\n%s\n%s'%(
self.classname, nodeid, poss_msg, strerror)
if default is not _marker:
- return Class.get(self, nodeid, propname, default, cache=cache)
+ return Class.get(self, nodeid, propname, default)
else:
- return Class.get(self, nodeid, propname, cache=cache)
+ return Class.get(self, nodeid, propname)
def getprops(self, protected=1):
''' In addition to the actual properties on the node, these methods
modified.
'''
d = Class.getprops(self, protected=protected).copy()
- if protected:
- d['content'] = hyperdb.String()
+ d['content'] = hyperdb.String()
return d
def index(self, nodeid):