index 26f78b61f6450afa6271cfc3a710e652dd3d14b1..3ba789897e29cc8a92ce354fec182c09e25e0a52 100644 (file)
__docformat__ = 'restructuredtext'
# standard python modules
-import sys, os, time, re, errno, weakref, copy, logging
+import sys, os, time, re, errno, weakref, copy, logging, datetime
# roundup modules
from roundup import hyperdb, date, password, roundupdb, security, support
from roundup.support import reversed
from roundup.i18n import _
+
# support
-from blobfiles import FileStorage
+from roundup.backends.blobfiles import FileStorage
try:
- from indexer_xapian import Indexer
+ from roundup.backends.indexer_xapian import Indexer
except ImportError:
- from indexer_rdbms import Indexer
-from sessions_rdbms import Sessions, OneTimeKeys
+ from roundup.backends.indexer_rdbms import Indexer
+from roundup.backends.sessions_rdbms import Sessions, OneTimeKeys
from roundup.date import Range
+from roundup.backends.back_anydbm import compile_expression
+
+
# dummy value meaning "argument not passed"
_marker = []
# assume it's a number returned from the db API
return int(value)
+def date_to_hyperdb_value(d):
+ """ convert date d to a roundup date """
+ if isinstance (d, datetime.datetime):
+ return date.Date(d)
+ return date.Date (str(d).replace(' ', '.'))
+
+
def connection_dict(config, dbnamestr=None):
""" Used by Postgresql and MySQL to detemine the keyword args for
opening the database connection."""
d[name] = config[cvar]
return d
+
+class IdListOptimizer:
+ """ To prevent flooding the SQL parser of the underlaying
+ db engine with "x IN (1, 2, 3, ..., <large number>)" collapses
+ these cases to "x BETWEEN 1 AND <large number>".
+ """
+
+ def __init__(self):
+ self.ranges = []
+ self.singles = []
+
+ def append(self, nid):
+ """ Invariant: nid are ordered ascending """
+ if self.ranges:
+ last = self.ranges[-1]
+ if last[1] == nid-1:
+ last[1] = nid
+ return
+ if self.singles:
+ last = self.singles[-1]
+ if last == nid-1:
+ self.singles.pop()
+ self.ranges.append([last, nid])
+ return
+ self.singles.append(nid)
+
+ def where(self, field, placeholder):
+ ranges = self.ranges
+ singles = self.singles
+
+ if not singles and not ranges: return "(1=0)", []
+
+ if ranges:
+ between = '%s BETWEEN %s AND %s' % (
+ field, placeholder, placeholder)
+ stmnt = [between] * len(ranges)
+ else:
+ stmnt = []
+ if singles:
+ stmnt.append('%s in (%s)' % (
+ field, ','.join([placeholder]*len(singles))))
+
+ return '(%s)' % ' OR '.join(stmnt), sum(ranges, []) + singles
+
+ def __str__(self):
+ return "ranges: %r / singles: %r" % (self.ranges, self.singles)
+
+
class Database(FileStorage, hyperdb.Database, roundupdb.Database):
""" Wrapper around an SQL database that presents a hyperdb interface.
# keep a cache of the N most recently retrieved rows of any kind
# (classname, nodeid) = row
self.cache_size = config.RDBMS_CACHE_SIZE
- self.cache = {}
- self.cache_lru = []
+ self.clearCache()
self.stats = {'cache_hits': 0, 'cache_misses': 0, 'get_items': 0,
'filtering': 0}
"""
raise NotImplemented
- def sql(self, sql, args=None):
+ def sql(self, sql, args=None, cursor=None):
""" Execute the sql with the optional args.
"""
self.log_debug('SQL %r %r'%(sql, args))
+ if not cursor:
+ cursor = self.cursor
if args:
- self.cursor.execute(sql, args)
+ cursor.execute(sql, args)
else:
- self.cursor.execute(sql)
+ cursor.execute(sql)
def sql_fetchone(self):
""" Fetch a single row. If there's nothing to fetch, return None.
"""
return self.cursor.fetchall()
+ def sql_fetchiter(self):
+ """ Fetch all row as a generator
+ """
+ while True:
+ row = self.cursor.fetchone()
+ if not row: break
+ yield row
+
def sql_stringquote(self, value):
""" Quote the string so it's safe to put in the 'sql quotes'
"""
# handle changes in the schema
tables = self.database_schema['tables']
- for classname, spec in self.classes.items():
- if tables.has_key(classname):
+ for classname, spec in self.classes.iteritems():
+ if classname in tables:
dbspec = tables[classname]
if self.update_class(spec, dbspec):
tables[classname] = spec.schema()
tables[classname] = spec.schema()
save = 1
- for classname, spec in tables.items():
- if not self.classes.has_key(classname):
+ for classname, spec in list(tables.items()):
+ if classname not in self.classes:
self.drop_class(classname, tables[classname])
del tables[classname]
save = 1
def fix_version_4_tables(self):
# note this is an explicit call now
c = self.cursor
- for cn, klass in self.classes.items():
+ for cn, klass in self.classes.iteritems():
c.execute('select id from _%s where __retired__<>0'%(cn,))
for (id,) in c.fetchall():
c.execute('update _%s set __retired__=%s where id=%s'%(cn,
"""Get current journal table contents, drop the table and re-create"""
c = self.cursor
cols = ','.join('nodeid date tag action params'.split())
- for klass in self.classes.values():
+ for klass in self.classes.itervalues():
# slurp and drop
sql = 'select %s from %s__journal order by date'%(cols,
klass.classname)
"""Get current Class tables that contain String properties, and
convert the VARCHAR columns to TEXT"""
c = self.cursor
- for klass in self.classes.values():
+ for klass in self.classes.itervalues():
# slurp and drop
- cols, mls = self.determine_columns(klass.properties.items())
+ cols, mls = self.determine_columns(list(klass.properties.iteritems()))
scols = ','.join([i[0] for i in cols])
sql = 'select id,%s from _%s'%(scols, klass.classname)
c.execute(sql)
if classname:
classes = [self.getclass(classname)]
else:
- classes = self.classes.values()
+ classes = list(self.classes.itervalues())
for klass in classes:
if show_progress:
for nodeid in support.Progress('Reindex %s'%klass.classname,
if issubclass(propclass, k):
return v
- raise ValueError, '%r is not a hyperdb property class' % propclass
+ raise ValueError('%r is not a hyperdb property class' % propclass)
def determine_columns(self, properties):
""" Figure the column names and multilink properties from the spec
continue
if isinstance(prop, type('')):
- raise ValueError, "string property spec!"
+ raise ValueError("string property spec!")
#and prop.find('Multilink') != -1:
#mls.append(col)
If 'force' is true, update the database anyway.
"""
- new_has = spec.properties.has_key
new_spec = spec.schema()
new_spec[1].sort()
old_spec[1].sort()
# no changes
return 0
- logger = logging.getLogger('hyperdb')
+ if not self.config.RDBMS_ALLOW_ALTER:
+ raise DatabaseError(_('ALTER operation disallowed: %r -> %r.'%(old_spec, new_spec)))
+
+ logger = logging.getLogger('roundup.hyperdb')
logger.info('update_class %s'%spec.classname)
logger.debug('old_spec %r'%(old_spec,))
old_has = {}
for name, prop in old_spec[1]:
old_has[name] = 1
- if new_has(name):
+ if name in spec.properties:
continue
if prop.find('Multilink to') != -1:
sql = 'alter table _%s drop column _%s'%(spec.classname, name)
self.sql(sql)
- old_has = old_has.has_key
# if we didn't remove the key prop just then, but the key prop has
# changed, we still need to remove the old index
- if keyprop_changes.has_key('remove'):
+ if 'remove' in keyprop_changes:
self.drop_class_table_key_index(spec.classname,
keyprop_changes['remove'])
# add new columns
for propname, prop in new_spec[1]:
- if old_has(propname):
+ if propname in old_has:
continue
prop = spec.properties[propname]
if isinstance(prop, Multilink):
# if we didn't add the key prop just then, but the key prop has
# changed, we still need to add the new index
- if keyprop_changes.has_key('add'):
+ if 'add' in keyprop_changes:
self.create_class_table_key_index(spec.classname,
keyprop_changes['add'])
"""Figure out the columns from the spec and also add internal columns
"""
- cols, mls = self.determine_columns(spec.properties.items())
+ cols, mls = self.determine_columns(list(spec.properties.iteritems()))
# add on our special columns
cols.append(('id', 'INTEGER PRIMARY KEY'))
def create_class(self, spec):
""" Create a database table according to the given spec.
"""
+
+ if not self.config.RDBMS_ALLOW_CREATE:
+ raise DatabaseError(_('CREATE operation disallowed: "%s".'%spec.classname))
+
cols, mls = self.create_class_table(spec)
self.create_journal_table(spec)
Drop the journal and multilink tables too.
"""
+
+ if not self.config.RDBMS_ALLOW_DROP:
+ raise DatabaseError(_('DROP operation disallowed: "%s".'%cn))
+
properties = spec[1]
# figure the multilinks
mls = []
def __getattr__(self, classname):
""" A convenient way of calling self.getclass(classname).
"""
- if self.classes.has_key(classname):
+ if classname in self.classes:
return self.classes[classname]
- raise AttributeError, classname
+ raise AttributeError(classname)
def addclass(self, cl):
""" Add a Class to the hyperdatabase.
"""
cn = cl.classname
- if self.classes.has_key(cn):
- raise ValueError, cn
+ if cn in self.classes:
+ raise ValueError(cn)
self.classes[cn] = cl
# add default Edit and View permissions
def getclasses(self):
""" Return a list of the names of all existing classes.
"""
- l = self.classes.keys()
- l.sort()
- return l
+ return sorted(self.classes)
def getclass(self, classname):
"""Get the Class object representing a particular class.
try:
return self.classes[classname]
except KeyError:
- raise KeyError, 'There is no class called "%s"'%classname
+ raise KeyError('There is no class called "%s"'%classname)
def clear(self):
"""Delete all database contents.
Note: I don't commit here, which is different behaviour to the
"nuke from orbit" behaviour in the dbs.
"""
- logging.getLogger('hyperdb').info('clear')
- for cn in self.classes.keys():
+ logging.getLogger('roundup.hyperdb').info('clear')
+ for cn in self.classes:
sql = 'delete from _%s'%cn
self.sql(sql)
if issubclass(propklass, k):
return v
- raise ValueError, '%r is not a hyperdb property class' % propklass
+ raise ValueError('%r is not a hyperdb property class' % propklass)
+
+ def _cache_del(self, key):
+ del self.cache[key]
+ self.cache_lru.remove(key)
+
+ def _cache_refresh(self, key):
+ self.cache_lru.remove(key)
+ self.cache_lru.insert(0, key)
+
+ def _cache_save(self, key, node):
+ self.cache[key] = node
+ # update the LRU
+ self.cache_lru.insert(0, key)
+ if len(self.cache_lru) > self.cache_size:
+ del self.cache[self.cache_lru.pop()]
def addnode(self, classname, nodeid, node):
""" Add the specified node to its class's db.
# determine the column definitions and multilink tables
cl = self.classes[classname]
- cols, mls = self.determine_columns(cl.properties.items())
+ cols, mls = self.determine_columns(list(cl.properties.iteritems()))
# we'll be supplied these props if we're doing an import
values = node.copy()
- if not values.has_key('creator'):
+ if 'creator' not in values:
# add in the "calculated" properties (dupe so we don't affect
# calling code's node assumptions)
values['creation'] = values['activity'] = date.Date()
del props['id']
# default the non-multilink columns
- for col, prop in props.items():
- if not values.has_key(col):
+ for col, prop in props.iteritems():
+ if col not in values:
if isinstance(prop, Multilink):
values[col] = []
else:
# clear this node out of the cache if it's in there
key = (classname, nodeid)
- if self.cache.has_key(key):
- del self.cache[key]
- self.cache_lru.remove(key)
+ if key in self.cache:
+ self._cache_del(key)
# figure the values to insert
vals = []
# clear this node out of the cache if it's in there
key = (classname, nodeid)
- if self.cache.has_key(key):
- del self.cache[key]
- self.cache_lru.remove(key)
+ if key in self.cache:
+ self._cache_del(key)
cl = self.classes[classname]
props = cl.getprops()
cols = []
mls = []
# add the multilinks separately
- for col in values.keys():
+ for col in values:
prop = props[col]
if isinstance(prop, Multilink):
mls.append(col)
self.sql(sql, (entry, nodeid))
# we have multilink changes to apply
- for col, (add, remove) in multilink_changes.items():
+ for col, (add, remove) in multilink_changes.iteritems():
tn = '%s_%s'%(classname, col)
if add:
sql = 'insert into %s (nodeid, linkid) values (%s,%s)'%(tn,
sql_to_hyperdb_value = {
hyperdb.String : str,
- hyperdb.Date : lambda x:date.Date(str(x).replace(' ', '.')),
+ hyperdb.Date : date_to_hyperdb_value,
# hyperdb.Link : int, # XXX numeric ids
hyperdb.Link : str,
hyperdb.Interval : date.Interval,
if issubclass(propklass, k):
return v
- raise ValueError, '%r is not a hyperdb property class' % propklass
+ raise ValueError('%r is not a hyperdb property class' % propklass)
def getnode(self, classname, nodeid):
""" Get a node from the database.
"""
# see if we have this node cached
key = (classname, nodeid)
- if self.cache.has_key(key):
+ if key in self.cache:
# push us back to the top of the LRU
- self.cache_lru.remove(key)
- self.cache_lru.insert(0, key)
+ self._cache_refresh(key)
if __debug__:
self.stats['cache_hits'] += 1
# return the cached information
# figure the columns we're fetching
cl = self.classes[classname]
- cols, mls = self.determine_columns(cl.properties.items())
+ cols, mls = self.determine_columns(list(cl.properties.iteritems()))
scols = ','.join([col for col,dt in cols])
# perform the basic property fetch
values = self.sql_fetchone()
if values is None:
- raise IndexError, 'no such %s node %s'%(classname, nodeid)
+ raise IndexError('no such %s node %s'%(classname, nodeid))
# make up the node
node = {}
value = self.to_hyperdb_value(props[name].__class__)(value)
node[name] = value
-
- # now the multilinks
- for col in mls:
- # get the link ids
- sql = 'select linkid from %s_%s where nodeid=%s'%(classname, col,
- self.arg)
- self.sql(sql, (nodeid,))
- # extract the first column from the result
- # XXX numeric ids
- items = [int(x[0]) for x in self.cursor.fetchall()]
- items.sort ()
- node[col] = [str(x) for x in items]
-
# save off in the cache
key = (classname, nodeid)
- self.cache[key] = node
- # update the LRU
- self.cache_lru.insert(0, key)
- if len(self.cache_lru) > self.cache_size:
- del self.cache[self.cache_lru.pop()]
+ self._cache_save(key, node)
if __debug__:
self.stats['get_items'] += (time.time() - start_t)
"""Remove a node from the database. Called exclusively by the
destroy() method on Class.
"""
- logging.getLogger('hyperdb').info('destroynode %s%s'%(classname, nodeid))
+ logging.getLogger('roundup.hyperdb').info('destroynode %s%s'%(
+ classname, nodeid))
# make sure the node exists
if not self.hasnode(classname, nodeid):
- raise IndexError, '%s has no node %s'%(classname, nodeid)
+ raise IndexError('%s has no node %s'%(classname, nodeid))
# see if we have this node cached
- if self.cache.has_key((classname, nodeid)):
+ if (classname, nodeid) in self.cache:
del self.cache[(classname, nodeid)]
# see if there's any obvious commit actions that we should get rid of
# remove from multilnks
cl = self.getclass(classname)
- x, mls = self.determine_columns(cl.properties.items())
+ x, mls = self.determine_columns(list(cl.properties.iteritems()))
for col in mls:
# get the link ids
sql = 'delete from %s_%s where nodeid=%s'%(classname, col, self.arg)
"""
# If this node is in the cache, then we do not need to go to
# the database. (We don't consider this an LRU hit, though.)
- if self.cache.has_key((classname, nodeid)):
+ if (classname, nodeid) in self.cache:
# Return 1, not True, to match the type of the result of
# the SQL operation below.
return 1
"""Convert the journal params values into safely repr'able and
eval'able values."""
properties = self.getclass(classname).getprops()
- for param, value in params.items():
+ for param, value in params.iteritems():
if not value:
continue
property = properties[param]
"""
# make sure the node exists
if not self.hasnode(classname, nodeid):
- raise IndexError, '%s has no node %s'%(classname, nodeid)
+ raise IndexError('%s has no node %s'%(classname, nodeid))
cols = ','.join('nodeid date tag action params'.split())
journal = self.load_journal(classname, cols, nodeid)
for nodeid, date_stamp, user, action, params in journal:
params = eval(params)
if isinstance(params, type({})):
- for param, value in params.items():
+ for param, value in params.iteritems():
if not value:
continue
property = properties.get(param, None)
date_stamp = self.to_sql_value(Date)(pack_before)
# do the delete
- for classname in self.classes.keys():
+ for classname in self.classes:
sql = "delete from %s__journal where date<%s and "\
"action<>'create'"%(classname, self.arg)
self.sql(sql, (date_stamp,))
def sql_commit(self, fail_ok=False):
""" Actually commit to the database.
"""
- logging.getLogger('hyperdb').info('commit')
+ logging.getLogger('roundup.hyperdb').info('commit')
self.conn.commit()
# clear out the transactions
self.transactions = []
+ # clear the cache: Don't carry over cached values from one
+ # transaction to the next (there may be other changes from other
+ # transactions)
+ self.clearCache()
+
def sql_rollback(self):
self.conn.rollback()
Undo all the changes made since the database was opened or the last
commit() or rollback() was performed.
"""
- logging.getLogger('hyperdb').info('rollback')
+ logging.getLogger('roundup.hyperdb').info('rollback')
self.sql_rollback()
self.clearCache()
def sql_close(self):
- logging.getLogger('hyperdb').info('close')
+ logging.getLogger('roundup.hyperdb').info('close')
self.conn.close()
def close(self):
""" A dumpable version of the schema that we can store in the
database
"""
- return (self.key, [(x, repr(y)) for x,y in self.properties.items()])
+ return (self.key, [(x, repr(y)) for x,y in self.properties.iteritems()])
def enableJournalling(self):
"""Turn journalling on for this class
def create_inner(self, **propvalues):
""" Called by create, in-between the audit and react calls.
"""
- if propvalues.has_key('id'):
- raise KeyError, '"id" is reserved'
+ if 'id' in propvalues:
+ raise KeyError('"id" is reserved')
if self.db.journaltag is None:
- raise DatabaseError, _('Database open read-only')
+ raise DatabaseError(_('Database open read-only'))
- if propvalues.has_key('creator') or propvalues.has_key('actor') or \
- propvalues.has_key('creation') or propvalues.has_key('activity'):
- raise KeyError, '"creator", "actor", "creation" and '\
- '"activity" are reserved'
+ if ('creator' in propvalues or 'actor' in propvalues or
+ 'creation' in propvalues or 'activity' in propvalues):
+ raise KeyError('"creator", "actor", "creation" and '
+ '"activity" are reserved')
# new node's id
newid = self.db.newid(self.classname)
# validate propvalues
num_re = re.compile('^\d+$')
- for key, value in propvalues.items():
+ for key, value in propvalues.iteritems():
if key == self.key:
try:
self.lookup(value)
except KeyError:
pass
else:
- raise ValueError, 'node with key "%s" exists'%value
+ raise ValueError('node with key "%s" exists'%value)
# try to handle this property
try:
prop = self.properties[key]
except KeyError:
- raise KeyError, '"%s" has no property "%s"'%(self.classname,
- key)
+ raise KeyError('"%s" has no property "%s"'%(self.classname,
+ key))
if value is not None and isinstance(prop, Link):
if type(value) != type(''):
- raise ValueError, 'link value must be String'
+ raise ValueError('link value must be String')
link_class = self.properties[key].classname
# if it isn't a number, it's a key
if not num_re.match(value):
try:
value = self.db.classes[link_class].lookup(value)
except (TypeError, KeyError):
- raise IndexError, 'new property "%s": %s not a %s'%(
- key, value, link_class)
+ raise IndexError('new property "%s": %s not a %s'%(
+ key, value, link_class))
elif not self.db.getclass(link_class).hasnode(value):
- raise IndexError, '%s has no node %s'%(link_class, value)
+ raise IndexError('%s has no node %s'%(link_class,
+ value))
# save off the value
propvalues[key] = value
if value is None:
value = []
if not hasattr(value, '__iter__'):
- raise TypeError, 'new property "%s" not an iterable of ids'%key
-
+ raise TypeError('new property "%s" not an iterable of ids'%key)
# clean up and validate the list of links
link_class = self.properties[key].classname
l = []
for entry in value:
if type(entry) != type(''):
- raise ValueError, '"%s" multilink value (%r) '\
- 'must contain Strings'%(key, value)
+ raise ValueError('"%s" multilink value (%r) '
+ 'must contain Strings'%(key, value))
# if it isn't a number, it's a key
if not num_re.match(entry):
try:
entry = self.db.classes[link_class].lookup(entry)
except (TypeError, KeyError):
- raise IndexError, 'new property "%s": %s not a %s'%(
- key, entry, self.properties[key].classname)
+ raise IndexError('new property "%s": %s not a %s'%(
+ key, entry, self.properties[key].classname))
l.append(entry)
value = l
propvalues[key] = value
# handle additions
for nodeid in value:
if not self.db.getclass(link_class).hasnode(nodeid):
- raise IndexError, '%s has no node %s'%(link_class,
- nodeid)
+ raise IndexError('%s has no node %s'%(link_class,
+ nodeid))
# register the link with the newly linked node
if self.do_journal and self.properties[key].do_journal:
self.db.addjournal(link_class, nodeid, 'link',
elif isinstance(prop, String):
if type(value) != type('') and type(value) != type(u''):
- raise TypeError, 'new property "%s" not a string'%key
+ raise TypeError('new property "%s" not a string'%key)
if prop.indexme:
self.db.indexer.add_text((self.classname, newid, key),
value)
elif isinstance(prop, Password):
if not isinstance(value, password.Password):
- raise TypeError, 'new property "%s" not a Password'%key
+ raise TypeError('new property "%s" not a Password'%key)
elif isinstance(prop, Date):
if value is not None and not isinstance(value, date.Date):
- raise TypeError, 'new property "%s" not a Date'%key
+ raise TypeError('new property "%s" not a Date'%key)
elif isinstance(prop, Interval):
if value is not None and not isinstance(value, date.Interval):
- raise TypeError, 'new property "%s" not an Interval'%key
+ raise TypeError('new property "%s" not an Interval'%key)
elif value is not None and isinstance(prop, Number):
try:
float(value)
except ValueError:
- raise TypeError, 'new property "%s" not numeric'%key
+ raise TypeError('new property "%s" not numeric'%key)
elif value is not None and isinstance(prop, Boolean):
try:
int(value)
except ValueError:
- raise TypeError, 'new property "%s" not boolean'%key
+ raise TypeError('new property "%s" not boolean'%key)
# make sure there's data where there needs to be
- for key, prop in self.properties.items():
- if propvalues.has_key(key):
+ for key, prop in self.properties.iteritems():
+ if key in propvalues:
continue
if key == self.key:
- raise ValueError, 'key property "%s" is required'%key
+ raise ValueError('key property "%s" is required'%key)
if isinstance(prop, Multilink):
propvalues[key] = []
else:
# get the node's dict
d = self.db.getnode(self.classname, nodeid)
-
- if propname == 'creation':
- if d.has_key('creation'):
- return d['creation']
- else:
- return date.Date()
- if propname == 'activity':
- if d.has_key('activity'):
- return d['activity']
- else:
- return date.Date()
- if propname == 'creator':
- if d.has_key('creator'):
- return d['creator']
- else:
- return self.db.getuid()
- if propname == 'actor':
- if d.has_key('actor'):
- return d['actor']
- else:
- return self.db.getuid()
-
- # get the property (raises KeyErorr if invalid)
+ # handle common case -- that property is in dict -- first
+ # if None and one of creator/creation actor/activity return None
+ if propname in d:
+ r = d [propname]
+ # return copy of our list
+ if isinstance (r, list):
+ return r[:]
+ if r is not None:
+ return r
+ elif propname in ('creation', 'activity', 'creator', 'actor'):
+ return r
+
+ # propname not in d:
+ if propname == 'creation' or propname == 'activity':
+ return date.Date()
+ if propname == 'creator' or propname == 'actor':
+ return self.db.getuid()
+
+ # get the property (raises KeyError if invalid)
prop = self.properties[propname]
- # XXX may it be that propname is valid property name
- # (above error is not raised) and not d.has_key(propname)???
- if (not d.has_key(propname)) or (d[propname] is None):
+ # lazy evaluation of Multilink
+ if propname not in d and isinstance(prop, Multilink):
+ sql = 'select linkid from %s_%s where nodeid=%s'%(self.classname,
+ propname, self.db.arg)
+ self.db.sql(sql, (nodeid,))
+ # extract the first column from the result
+ # XXX numeric ids
+ items = [int(x[0]) for x in self.db.cursor.fetchall()]
+ items.sort ()
+ d[propname] = [str(x) for x in items]
+
+ # handle there being no value in the table for the property
+ if propname not in d or d[propname] is None:
if default is _marker:
if isinstance(prop, Multilink):
return []
if not propvalues:
return propvalues
- if propvalues.has_key('creation') or propvalues.has_key('creator') or \
- propvalues.has_key('actor') or propvalues.has_key('activity'):
- raise KeyError, '"creation", "creator", "actor" and '\
- '"activity" are reserved'
+ if ('creator' in propvalues or 'actor' in propvalues or
+ 'creation' in propvalues or 'activity' in propvalues):
+ raise KeyError('"creator", "actor", "creation" and '
+ '"activity" are reserved')
- if propvalues.has_key('id'):
- raise KeyError, '"id" is reserved'
+ if 'id' in propvalues:
+ raise KeyError('"id" is reserved')
if self.db.journaltag is None:
- raise DatabaseError, _('Database open read-only')
+ raise DatabaseError(_('Database open read-only'))
node = self.db.getnode(self.classname, nodeid)
if self.is_retired(nodeid):
- raise IndexError, 'Requested item is retired'
+ raise IndexError('Requested item is retired')
num_re = re.compile('^\d+$')
# make a copy of the values dictionary - we'll modify the contents
# for the Database layer to do its stuff
multilink_changes = {}
- for propname, value in propvalues.items():
+ for propname, value in list(propvalues.items()):
# check to make sure we're not duplicating an existing key
if propname == self.key and node[propname] != value:
try:
except KeyError:
pass
else:
- raise ValueError, 'node with key "%s" exists'%value
+ raise ValueError('node with key "%s" exists'%value)
# this will raise the KeyError if the property isn't valid
# ... we don't use getprops() here because we only care about
try:
prop = self.properties[propname]
except KeyError:
- raise KeyError, '"%s" has no property named "%s"'%(
- self.classname, propname)
+ raise KeyError('"%s" has no property named "%s"'%(
+ self.classname, propname))
# if the value's the same as the existing value, no sense in
# doing anything
link_class = prop.classname
# if it isn't a number, it's a key
if value is not None and not isinstance(value, type('')):
- raise ValueError, 'property "%s" link value be a string'%(
- propname)
+ raise ValueError('property "%s" link value be a string'%(
+ propname))
if isinstance(value, type('')) and not num_re.match(value):
try:
value = self.db.classes[link_class].lookup(value)
except (TypeError, KeyError):
- raise IndexError, 'new property "%s": %s not a %s'%(
- propname, value, prop.classname)
+ raise IndexError('new property "%s": %s not a %s'%(
+ propname, value, prop.classname))
if (value is not None and
not self.db.getclass(link_class).hasnode(value)):
- raise IndexError, '%s has no node %s'%(link_class, value)
+ raise IndexError('%s has no node %s'%(link_class,
+ value))
if self.do_journal and prop.do_journal:
# register the unlink with the old linked node
if value is None:
value = []
if not hasattr(value, '__iter__'):
- raise TypeError, 'new property "%s" not an iterable of'\
- ' ids'%propname
+ raise TypeError('new property "%s" not an iterable of'
+ ' ids'%propname)
link_class = self.properties[propname].classname
l = []
for entry in value:
# if it isn't a number, it's a key
if type(entry) != type(''):
- raise ValueError, 'new property "%s" link value ' \
- 'must be a string'%propname
+ raise ValueError('new property "%s" link value '
+ 'must be a string'%propname)
if not num_re.match(entry):
try:
entry = self.db.classes[link_class].lookup(entry)
except (TypeError, KeyError):
- raise IndexError, 'new property "%s": %s not a %s'%(
+ raise IndexError('new property "%s": %s not a %s'%(
propname, entry,
- self.properties[propname].classname)
+ self.properties[propname].classname))
l.append(entry)
value = l
propvalues[propname] = value
remove = []
# handle removals
- if node.has_key(propname):
+ if propname in node:
l = node[propname]
else:
l = []
# handle additions
for id in value:
- if not self.db.getclass(link_class).hasnode(id):
- raise IndexError, '%s has no node %s'%(link_class, id)
if id in l:
continue
+ # We can safely check this condition after
+ # checking that this is an addition to the
+ # multilink since the condition was checked for
+ # existing entries at the point they were added to
+ # the multilink. Since the hasnode call will
+ # result in a SQL query, it is more efficient to
+ # avoid the check if possible.
+ if not self.db.getclass(link_class).hasnode(id):
+ raise IndexError('%s has no node %s'%(link_class,
+ id))
# register the link with the newly linked node
if self.do_journal and self.properties[propname].do_journal:
self.db.addjournal(link_class, id, 'link',
elif isinstance(prop, String):
if value is not None and type(value) != type('') and type(value) != type(u''):
- raise TypeError, 'new property "%s" not a string'%propname
+ raise TypeError('new property "%s" not a string'%propname)
if prop.indexme:
if value is None: value = ''
self.db.indexer.add_text((self.classname, nodeid, propname),
elif isinstance(prop, Password):
if not isinstance(value, password.Password):
- raise TypeError, 'new property "%s" not a Password'%propname
+ raise TypeError('new property "%s" not a Password'%propname)
propvalues[propname] = value
elif value is not None and isinstance(prop, Date):
if not isinstance(value, date.Date):
- raise TypeError, 'new property "%s" not a Date'% propname
+ raise TypeError('new property "%s" not a Date'% propname)
propvalues[propname] = value
elif value is not None and isinstance(prop, Interval):
if not isinstance(value, date.Interval):
- raise TypeError, 'new property "%s" not an '\
- 'Interval'%propname
+ raise TypeError('new property "%s" not an '
+ 'Interval'%propname)
propvalues[propname] = value
elif value is not None and isinstance(prop, Number):
try:
float(value)
except ValueError:
- raise TypeError, 'new property "%s" not numeric'%propname
+ raise TypeError('new property "%s" not numeric'%propname)
elif value is not None and isinstance(prop, Boolean):
try:
int(value)
except ValueError:
- raise TypeError, 'new property "%s" not boolean'%propname
+ raise TypeError('new property "%s" not boolean'%propname)
# nothing to do?
if not propvalues:
methods, and other nodes may reuse the values of their key properties.
"""
if self.db.journaltag is None:
- raise DatabaseError, _('Database open read-only')
+ raise DatabaseError(_('Database open read-only'))
self.fireAuditors('retire', nodeid, None)
Make node available for all operations like it was before retirement.
"""
if self.db.journaltag is None:
- raise DatabaseError, _('Database open read-only')
+ raise DatabaseError(_('Database open read-only'))
node = self.db.getnode(self.classname, nodeid)
# check if key property was overrided
except KeyError:
pass
else:
- raise KeyError, "Key property (%s) of retired node clashes with \
- existing one (%s)" % (key, node[key])
+ raise KeyError("Key property (%s) of retired node clashes "
+ "with existing one (%s)" % (key, node[key]))
self.fireAuditors('restore', nodeid, None)
# use the arg for __retired__ to cope with any odd database type
if there are any references to the node.
"""
if self.db.journaltag is None:
- raise DatabaseError, _('Database open read-only')
+ raise DatabaseError(_('Database open read-only'))
self.db.destroynode(self.classname, nodeid)
def history(self, nodeid):
'tag' is the journaltag specified when the database was opened.
"""
if not self.do_journal:
- raise ValueError, 'Journalling is disabled for this class'
+ raise ValueError('Journalling is disabled for this class')
return self.db.getjournal(self.classname, nodeid)
# Locating nodes:
"""
prop = self.getprops()[propname]
if not isinstance(prop, String):
- raise TypeError, 'key properties must be String'
+ raise TypeError('key properties must be String')
self.key = propname
def getkey(self):
otherwise a KeyError is raised.
"""
if not self.key:
- raise TypeError, 'No key property set for class %s'%self.classname
+ raise TypeError('No key property set for class %s'%self.classname)
# use the arg to handle any odd database type conversion (hello,
# sqlite)
# see if there was a result that's not retired
row = self.db.sql_fetchone()
if not row:
- raise KeyError, 'No key (%s) value "%s" for "%s"'%(self.key,
- keyvalue, self.classname)
+ raise KeyError('No key (%s) value "%s" for "%s"'%(self.key,
+ keyvalue, self.classname))
# return the id
# XXX numeric ids
# validate the args
props = self.getprops()
- propspec = propspec.items()
- for propname, nodeids in propspec:
+ for propname, nodeids in propspec.iteritems():
# check the prop is OK
prop = props[propname]
if not isinstance(prop, Link) and not isinstance(prop, Multilink):
- raise TypeError, "'%s' not a Link/Multilink property"%propname
+ raise TypeError("'%s' not a Link/Multilink property"%propname)
# first, links
a = self.db.arg
allvalues = ()
sql = []
where = []
- for prop, values in propspec:
+ for prop, values in propspec.iteritems():
if not isinstance(props[prop], hyperdb.Link):
continue
if type(values) is type({}) and len(values) == 1:
- values = values.keys()[0]
+ values = list(values)[0]
if type(values) is type(''):
allvalues += (values,)
where.append('_%s = %s'%(prop, a))
elif values is None:
where.append('_%s is NULL'%prop)
else:
- values = values.keys()
+ values = list(values)
s = ''
if None in values:
values.remove(None)
and %s"""%(self.classname, a, ' and '.join(where)))
# now multilinks
- for prop, values in propspec:
+ for prop, values in propspec.iteritems():
if not isinstance(props[prop], hyperdb.Multilink):
continue
if not values:
allvalues += (values,)
s = a
else:
- allvalues += tuple(values.keys())
+ allvalues += tuple(values)
s = ','.join([a]*len(values))
tn = '%s_%s'%(self.classname, prop)
sql.append("""select id from _%s, %s where __retired__=%s
"""
where = []
args = []
- for propname in requirements.keys():
+ for propname in requirements:
prop = self.properties[propname]
if not isinstance(prop, String):
- raise TypeError, "'%s' not a String property"%propname
+ raise TypeError("'%s' not a String property"%propname)
where.append(propname)
args.append(requirements[propname].lower())
# The format parameter is replaced with the attribute.
order_by_null_values = None
- def filter(self, search_matches, filterspec, sort=[], group=[]):
- """Return a list of the ids of the active nodes in this class that
- match the 'filter' spec, sorted by the group spec and then the
- sort spec
-
- "filterspec" is {propname: value(s)}
-
- "sort" and "group" are [(dir, prop), ...] where dir is '+', '-'
- or None and prop is a prop name or None. Note that for
- backward-compatibility reasons a single (dir, prop) tuple is
- also allowed.
+ def supports_subselects(self):
+ '''Assuming DBs can do subselects, overwrite if they cannot.
+ '''
+ return True
+
+ def _filter_multilink_expression_fallback(
+ self, classname, multilink_table, expr):
+ '''This is a fallback for database that do not support
+ subselects.'''
+
+ is_valid = expr.evaluate
+
+ last_id, kws = None, []
+
+ ids = IdListOptimizer()
+ append = ids.append
+
+ # This join and the evaluation in program space
+ # can be expensive for larger databases!
+ # TODO: Find a faster way to collect the data needed
+ # to evalute the expression.
+ # Moving the expression evaluation into the database
+ # would be nice but this tricky: Think about the cases
+ # where the multilink table does not have join values
+ # needed in evaluation.
+
+ stmnt = "SELECT c.id, m.linkid FROM _%s c " \
+ "LEFT OUTER JOIN %s m " \
+ "ON c.id = m.nodeid ORDER BY c.id" % (
+ classname, multilink_table)
+ self.db.sql(stmnt)
+
+ # collect all multilink items for a class item
+ for nid, kw in self.db.sql_fetchiter():
+ if nid != last_id:
+ if last_id is None:
+ last_id = nid
+ else:
+ # we have all multilink items -> evaluate!
+ if is_valid(kws): append(last_id)
+ last_id, kws = nid, []
+ if kw is not None:
+ kws.append(kw)
- "search_matches" is a container type or None
+ if last_id is not None and is_valid(kws):
+ append(last_id)
- The filter must match all properties specificed. If the property
- value to match is a list:
+ # we have ids of the classname table
+ return ids.where("_%s.id" % classname, self.db.arg)
- 1. String properties must match all elements in the list, and
- 2. Other properties must match any of the elements in the list.
+ def _filter_multilink_expression(self, classname, multilink_table, v):
+ """ Filters out elements of the classname table that do not
+ match the given expression.
+ Returns tuple of 'WHERE' introns for the overall filter.
+ """
+ try:
+ opcodes = [int(x) for x in v]
+ if min(opcodes) >= -1: raise ValueError()
+
+ expr = compile_expression(opcodes)
+
+ if not self.supports_subselects():
+ # We heavily rely on subselects. If there is
+ # no decent support fall back to slower variant.
+ return self._filter_multilink_expression_fallback(
+ classname, multilink_table, expr)
+
+ atom = \
+ "%s IN(SELECT linkid FROM %s WHERE nodeid=a.id)" % (
+ self.db.arg,
+ multilink_table)
+
+ intron = \
+ "_%(classname)s.id in (SELECT id " \
+ "FROM _%(classname)s AS a WHERE %(condition)s) " % {
+ 'classname' : classname,
+ 'condition' : expr.generate(lambda n: atom) }
+
+ values = []
+ def collect_values(n): values.append(n.x)
+ expr.visit(collect_values)
+
+ return intron, values
+ except:
+ # original behavior
+ where = "%s.linkid in (%s)" % (
+ multilink_table, ','.join([self.db.arg] * len(v)))
+ return where, v, True # True to indicate original
+
+ def _filter_sql (self, search_matches, filterspec, srt=[], grp=[], retr=0):
+ """ Compute the proptree and the SQL/ARGS for a filter.
+ For argument description see filter below.
+ We return a 3-tuple, the proptree, the sql and the sql-args
+ or None if no SQL is necessary.
+ The flag retr serves to retrieve *all* non-Multilink properties
+ (for filling the cache during a filter_iter)
"""
# we can't match anything if search_matches is empty
if not search_matches and search_matches is not None:
- return []
-
- if __debug__:
- start_t = time.time()
+ return None
icn = self.classname
# figure the WHERE clause from the filterspec
mlfilt = 0 # are we joining with Multilink tables?
- sortattr = self._sortattr (group = group, sort = sort)
- proptree = self._proptree(filterspec, sortattr)
+ sortattr = self._sortattr (group = grp, sort = srt)
+ proptree = self._proptree(filterspec, sortattr, retr)
mlseen = 0
for pt in reversed(proptree.sortattr):
p = pt
pt.attr_sort_done = pt.tree_sort_done = True
proptree.compute_sort_done()
- ordercols = []
- auxcols = {}
+ cols = ['_%s.id'%icn]
mlsort = []
rhsnum = 0
for p in proptree:
- oc = None
+ rc = ac = oc = None
cn = p.classname
ln = p.uniqname
pln = p.parent.uniqname
k = p.name
v = p.val
propclass = p.propclass
- if p.sort_type > 0:
- oc = ac = '_%s._%s'%(pln, k)
+ if p.parent == proptree and p.name == 'id' \
+ and 'retrieve' in p.need_for:
+ p.sql_idx = 0
+ if 'sort' in p.need_for or 'retrieve' in p.need_for:
+ rc = oc = ac = '_%s._%s'%(pln, k)
if isinstance(propclass, Multilink):
- if p.sort_type < 2:
+ if 'search' in p.need_for:
mlfilt = 1
tn = '%s_%s'%(pcn, k)
if v in ('-1', ['-1'], []):
where.append(self._subselect(pcn, tn))
else:
frum.append(tn)
- where.append('_%s.id=%s.nodeid'%(pln,tn))
+ gen_join = True
+
+ if p.has_values and isinstance(v, type([])):
+ result = self._filter_multilink_expression(pln, tn, v)
+ # XXX: We dont need an id join if we used the filter
+ gen_join = len(result) == 3
+
+ if gen_join:
+ where.append('_%s.id=%s.nodeid'%(pln,tn))
+
if p.children:
frum.append('_%s as _%s' % (cn, ln))
where.append('%s.linkid=_%s.id'%(tn, ln))
+
if p.has_values:
if isinstance(v, type([])):
- s = ','.join([a for x in v])
- where.append('%s.linkid in (%s)'%(tn, s))
- args = args + v
+ where.append(result[0])
+ args += result[1]
else:
where.append('%s.linkid=%s'%(tn, a))
args.append(v)
- if p.sort_type > 0:
+ if 'sort' in p.need_for:
assert not p.attr_sort_done and not p.sort_ids_needed
elif k == 'id':
- if p.sort_type < 2:
+ if 'search' in p.need_for:
if isinstance(v, type([])):
+ # If there are no permitted values, then the
+ # where clause will always be false, and we
+ # can optimize the query away.
+ if not v:
+ return []
s = ','.join([a for x in v])
where.append('_%s.%s in (%s)'%(pln, k, s))
args = args + v
else:
where.append('_%s.%s=%s'%(pln, k, a))
args.append(v)
- if p.sort_type > 0:
- oc = ac = '_%s.id'%pln
+ if 'sort' in p.need_for or 'retrieve' in p.need_for:
+ rc = oc = ac = '_%s.id'%pln
elif isinstance(propclass, String):
- if p.sort_type < 2:
+ if 'search' in p.need_for:
if not isinstance(v, type([])):
v = [v]
+' and '.join(["_%s._%s LIKE '%s'"%(pln, k, s) for s in v])
+')')
# note: args are embedded in the query string now
- if p.sort_type > 0:
+ if 'sort' in p.need_for:
oc = ac = 'lower(_%s._%s)'%(pln, k)
elif isinstance(propclass, Link):
- if p.sort_type < 2:
+ if 'search' in p.need_for:
if p.children:
- if p.sort_type == 0:
+ if 'sort' not in p.need_for:
frum.append('_%s as _%s' % (cn, ln))
where.append('_%s._%s=_%s.id'%(pln, k, ln))
if p.has_values:
entry = None
d[entry] = entry
l = []
- if d.has_key(None) or not d:
- if d.has_key(None): del d[None]
+ if None in d or not d:
+ if None in d: del d[None]
l.append('_%s._%s is NULL'%(pln, k))
if d:
- v = d.keys()
+ v = list(d)
s = ','.join([a for x in v])
l.append('(_%s._%s in (%s))'%(pln, k, s))
args = args + v
else:
where.append('_%s._%s=%s'%(pln, k, a))
args.append(v)
- if p.sort_type > 0:
+ if 'sort' in p.need_for:
lp = p.cls.labelprop()
oc = ac = '_%s._%s'%(pln, k)
if lp != 'id':
- if p.tree_sort_done and p.sort_type > 0:
+ if p.tree_sort_done:
loj.append(
'LEFT OUTER JOIN _%s as _%s on _%s._%s=_%s.id'%(
cn, ln, pln, k, ln))
oc = '_%s._%s'%(ln, lp)
- elif isinstance(propclass, Date) and p.sort_type < 2:
+ if 'retrieve' in p.need_for:
+ rc = '_%s._%s'%(pln, k)
+ elif isinstance(propclass, Date) and 'search' in p.need_for:
dc = self.db.to_sql_value(hyperdb.Date)
if isinstance(v, type([])):
s = ','.join([a for x in v])
pass
elif isinstance(propclass, Interval):
# filter/sort using the __<prop>_int__ column
- if p.sort_type < 2:
+ if 'search' in p.need_for:
if isinstance(v, type([])):
s = ','.join([a for x in v])
where.append('_%s.__%s_int__ in (%s)'%(pln, k, s))
except ValueError:
# If range creation fails - ignore search parameter
pass
- if p.sort_type > 0:
+ if 'sort' in p.need_for:
oc = ac = '_%s.__%s_int__'%(pln,k)
- elif p.sort_type < 2:
+ if 'retrieve' in p.need_for:
+ rc = '_%s._%s'%(pln,k)
+ elif isinstance(propclass, Boolean) and 'search' in p.need_for:
+ if type(v) == type(""):
+ v = v.split(',')
+ if type(v) != type([]):
+ v = [v]
+ bv = []
+ for val in v:
+ if type(val) is type(''):
+ bv.append(propclass.from_raw (val))
+ else:
+ bv.append(bool(val))
+ if len(bv) == 1:
+ where.append('_%s._%s=%s'%(pln, k, a))
+ args = args + bv
+ else:
+ s = ','.join([a for x in v])
+ where.append('_%s._%s in (%s)'%(pln, k, s))
+ args = args + bv
+ elif 'search' in p.need_for:
if isinstance(v, type([])):
s = ','.join([a for x in v])
where.append('_%s._%s in (%s)'%(pln, k, s))
args.append(v)
if oc:
if p.sort_ids_needed:
- auxcols[ac] = p
+ if rc == ac:
+ p.sql_idx = len(cols)
+ p.auxcol = len(cols)
+ cols.append(ac)
if p.tree_sort_done and p.sort_direction:
- # Don't select top-level id twice
- if p.name != 'id' or p.parent != proptree:
- ordercols.append(oc)
+ # Don't select top-level id or multilink twice
+ if (not p.sort_ids_needed or ac != oc) and (p.name != 'id'
+ or p.parent != proptree):
+ if rc == oc:
+ p.sql_idx = len(cols)
+ cols.append(oc)
desc = ['', ' desc'][p.sort_direction == '-']
# Some SQL dbs sort NULL values last -- we want them first.
if (self.order_by_null_values and p.name != 'id'):
nv = self.order_by_null_values % oc
- ordercols.append(nv)
+ cols.append(nv)
p.orderby.append(nv + desc)
p.orderby.append(oc + desc)
+ if 'retrieve' in p.need_for and p.sql_idx is None:
+ assert(rc)
+ p.sql_idx = len(cols)
+ cols.append (rc)
props = self.getprops()
if search_matches is not None:
s = ','.join([a for x in search_matches])
where.append('_%s.id in (%s)'%(icn, s))
- args = args + v
+ args = args + [x for x in search_matches]
# construct the SQL
frum.append('_'+icn)
if mlfilt:
# we're joining tables on the id, so we will get dupes if we
# don't distinct()
- cols = ['distinct(_%s.id)'%icn]
- else:
- cols = ['_%s.id'%icn]
- if ordercols:
- cols = cols + ordercols
+ cols[0] = 'distinct(_%s.id)'%icn
+
order = []
# keep correct sequence of order attributes.
for sa in proptree.sortattr:
order = ' order by %s'%(','.join(order))
else:
order = ''
- for o, p in auxcols.iteritems ():
- cols.append (o)
- p.auxcol = len (cols) - 1
cols = ','.join(cols)
loj = ' '.join(loj)
sql = 'select %s from %s %s %s%s'%(cols, frum, loj, where, order)
args = tuple(args)
__traceback_info__ = (sql, args)
+ return proptree, sql, args
+
+ def filter(self, search_matches, filterspec, sort=[], group=[]):
+ """Return a list of the ids of the active nodes in this class that
+ match the 'filter' spec, sorted by the group spec and then the
+ sort spec
+
+ "filterspec" is {propname: value(s)}
+
+ "sort" and "group" are [(dir, prop), ...] where dir is '+', '-'
+ or None and prop is a prop name or None. Note that for
+ backward-compatibility reasons a single (dir, prop) tuple is
+ also allowed.
+
+ "search_matches" is a container type or None
+
+ The filter must match all properties specificed. If the property
+ value to match is a list:
+
+ 1. String properties must match all elements in the list, and
+ 2. Other properties must match any of the elements in the list.
+ """
+ if __debug__:
+ start_t = time.time()
+
+ sq = self._filter_sql (search_matches, filterspec, sort, group)
+ # nothing to match?
+ if sq is None:
+ return []
+ proptree, sql, args = sq
+
self.db.sql(sql, args)
l = self.db.sql_fetchall()
# Compute values needed for sorting in proptree.sort
- for p in auxcols.itervalues():
- p.sort_ids = p.sort_result = [row[p.auxcol] for row in l]
+ for p in proptree:
+ if hasattr(p, 'auxcol'):
+ p.sort_ids = p.sort_result = [row[p.auxcol] for row in l]
# return the IDs (the first column)
# XXX numeric ids
l = [str(row[0]) for row in l]
self.db.stats['filtering'] += (time.time() - start_t)
return l
+ def filter_iter(self, search_matches, filterspec, sort=[], group=[]):
+ """Iterator similar to filter above with same args.
+ Limitation: We don't sort on multilinks.
+ This uses an optimisation: We put all nodes that are in the
+ current row into the node cache. Then we return the node id.
+ That way a fetch of a node won't create another sql-fetch (with
+ a join) from the database because the nodes are already in the
+ cache. We're using our own temporary cursor.
+ """
+ sq = self._filter_sql(search_matches, filterspec, sort, group, retr=1)
+ # nothing to match?
+ if sq is None:
+ return
+ proptree, sql, args = sq
+ cursor = self.db.conn.cursor()
+ self.db.sql(sql, args, cursor)
+ classes = {}
+ for p in proptree:
+ if 'retrieve' in p.need_for:
+ cn = p.parent.classname
+ ptid = p.parent.id # not the nodeid!
+ key = (cn, ptid)
+ if key not in classes:
+ classes[key] = {}
+ name = p.name
+ assert (name)
+ classes[key][name] = p
+ p.to_hyperdb = self.db.to_hyperdb_value(p.propclass.__class__)
+ while True:
+ row = cursor.fetchone()
+ if not row: break
+ # populate cache with current items
+ for (classname, ptid), pt in classes.iteritems():
+ nodeid = str(row[pt['id'].sql_idx])
+ key = (classname, nodeid)
+ if key in self.db.cache:
+ self.db._cache_refresh(key)
+ continue
+ node = {}
+ for propname, p in pt.iteritems():
+ value = row[p.sql_idx]
+ if value is not None:
+ value = p.to_hyperdb(value)
+ node[propname] = value
+ self.db._cache_save(key, node)
+ yield str(row[0])
+
def filter_sql(self, sql):
"""Return a list of the ids of the items in this class that match
the SQL provided. The SQL is a complete "select" statement.
may collide with the names of existing properties, or a ValueError
is raised before any properties have been added.
"""
- for key in properties.keys():
- if self.properties.has_key(key):
- raise ValueError, key
+ for key in properties:
+ if key in self.properties:
+ raise ValueError(key)
self.properties.update(properties)
def index(self, nodeid):
"""Add (or refresh) the node to search indexes
"""
# find all the String properties that have indexme
- for prop, propclass in self.getprops().items():
+ for prop, propclass in self.getprops().iteritems():
if isinstance(propclass, String) and propclass.indexme:
self.db.indexer.add_text((self.classname, nodeid, prop),
str(self.get(nodeid, prop)))
Return the nodeid of the node imported.
"""
if self.db.journaltag is None:
- raise DatabaseError, _('Database open read-only')
+ raise DatabaseError(_('Database open read-only'))
properties = self.getprops()
# make the new node's property map
if isinstance(value, unicode):
value = value.encode('utf8')
if not isinstance(value, str):
- raise TypeError, \
- 'new property "%(propname)s" not a string: %(value)r' \
- % locals()
+ raise TypeError('new property "%(propname)s" not a '
+ 'string: %(value)r'%locals())
if prop.indexme:
self.db.indexer.add_text((self.classname, newid, propname),
value)
date = date.get_tuple()
if action == 'set':
export_data = {}
- for propname, value in params.items():
- if not properties.has_key(propname):
+ for propname, value in params.iteritems():
+ if propname not in properties:
# property no longer in the schema
continue
# old tracker with data stored in the create!
params = {}
l = [nodeid, date, user, action, params]
- r.append(map(repr, l))
+ r.append(list(map(repr, l)))
return r
- def import_journals(self, entries):
- """Import a class's journal.
-
- Uses setjournal() to set the journal for each item."""
- properties = self.getprops()
- d = {}
- for l in entries:
- l = map(eval, l)
- nodeid, jdate, user, action, params = l
- r = d.setdefault(nodeid, [])
- if action == 'set':
- for propname, value in params.items():
- prop = properties[propname]
- if value is None:
- pass
- elif isinstance(prop, Date):
- value = date.Date(value)
- elif isinstance(prop, Interval):
- value = date.Interval(value)
- elif isinstance(prop, Password):
- pwd = password.Password()
- pwd.unpack(value)
- value = pwd
- params[propname] = value
- elif action == 'create' and params:
- # old tracker with data stored in the create!
- params = {}
- r.append((nodeid, date.Date(jdate), user, action, params))
-
- for nodeid, l in d.items():
- self.db.setjournal(self.classname, nodeid, l)
-
class FileClass(hyperdb.FileClass, Class):
"""This class defines a large chunk of data. To support this, it has a
mandatory String property "content" which is typically saved off
"""The newly-created class automatically includes the "content"
and "type" properties.
"""
- if not properties.has_key('content'):
+ if 'content' not in properties:
properties['content'] = hyperdb.String(indexme='yes')
- if not properties.has_key('type'):
+ if 'type' not in properties:
properties['type'] = hyperdb.String()
Class.__init__(self, db, classname, **properties)
if propname == 'content':
try:
return self.db.getfile(self.classname, nodeid, None)
- except IOError, (strerror):
+ except IOError, strerror:
# BUG: by catching this we donot see an error in the log.
return 'ERROR reading file: %s%s\n%s\n%s'%(
self.classname, nodeid, poss_msg, strerror)
# now remove the content property so it's not stored in the db
content = None
- if propvalues.has_key('content'):
+ if 'content' in propvalues:
content = propvalues['content']
del propvalues['content']
Use the content-type property for the content property.
"""
# find all the String properties that have indexme
- for prop, propclass in self.getprops().items():
+ for prop, propclass in self.getprops().iteritems():
if prop == 'content' and propclass.indexme:
mime_type = self.get(nodeid, 'type', self.default_mime_type)
self.db.indexer.add_text((self.classname, nodeid, 'content'),
"creation", "creator", "activity" or "actor" property, a ValueError
is raised.
"""
- if not properties.has_key('title'):
+ if 'title' not in properties:
properties['title'] = hyperdb.String(indexme='yes')
- if not properties.has_key('messages'):
+ if 'messages' not in properties:
properties['messages'] = hyperdb.Multilink("msg")
- if not properties.has_key('files'):
+ if 'files' not in properties:
properties['files'] = hyperdb.Multilink("file")
- if not properties.has_key('nosy'):
+ if 'nosy' not in properties:
# note: journalling is turned off as it really just wastes
# space. this behaviour may be overridden in an instance
properties['nosy'] = hyperdb.Multilink("user", do_journal="no")
- if not properties.has_key('superseder'):
+ if 'superseder' not in properties:
properties['superseder'] = hyperdb.Multilink(classname)
Class.__init__(self, db, classname, **properties)