summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 6033f0e)
raw | patch | inline | side by side (parent: 6033f0e)
author | richard <richard@57a73879-2fb5-44c3-a270-3262357dd7e2> | |
Mon, 23 Sep 2002 06:48:35 +0000 (06:48 +0000) | ||
committer | richard <richard@57a73879-2fb5-44c3-a270-3262357dd7e2> | |
Mon, 23 Sep 2002 06:48:35 +0000 (06:48 +0000) |
Better date unserialisation too.
git-svn-id: http://svn.roundup-tracker.org/svnroot/roundup/trunk@1207 57a73879-2fb5-44c3-a270-3262357dd7e2
git-svn-id: http://svn.roundup-tracker.org/svnroot/roundup/trunk@1207 57a73879-2fb5-44c3-a270-3262357dd7e2
diff --git a/TODO.txt b/TODO.txt
index 0fbb7b9ed4aa8d700870045e5c1eb717eab07ad2..cbcf579d54d4b11b592d4dac5803db26fe1359ff 100644 (file)
--- a/TODO.txt
+++ b/TODO.txt
bug mailgw some f*ked mailers QUOTE their Re; "Re: "[issue1] bla blah""
bug docs need to mention somewhere how sorting works
+bug import import isn't setting the journal props correctly
======= ========= =============================================================
index 76db52e48c0095d687755ef1ff6941fa74fc7f7c..1896c625bf2722438cbbdd82b2ffc489c80d4869 100644 (file)
-# $Id: back_gadfly.py,v 1.24 2002-09-20 01:20:31 richard Exp $
+# $Id: back_gadfly.py,v 1.25 2002-09-23 06:48:34 richard Exp $
__doc__ = '''
About Gadfly
============
self.database_schema = {}
self.conn = gadfly.gadfly()
self.conn.startup(*db)
- cursor = self.conn.cursor()
- cursor.execute('create table schema (schema varchar)')
- cursor.execute('create table ids (name varchar, num integer)')
+ self.cursor = self.conn.cursor()
+ self.cursor.execute('create table schema (schema varchar)')
+ self.cursor.execute('create table ids (name varchar, num integer)')
else:
- cursor = self.conn.cursor()
- cursor.execute('select schema from schema')
- self.database_schema = cursor.fetchone()[0]
+ self.cursor = self.conn.cursor()
+ self.cursor.execute('select schema from schema')
+ self.database_schema = self.cursor.fetchone()[0]
else:
self.conn = gadfly.client.gfclient(*db)
- self.database_schema = self.load_dbschema(cursor)
+ self.database_schema = self.load_dbschema()
def __repr__(self):
return '<roundfly 0x%x>'%id(self)
- def sql_fetchone(self, cursor):
+ def sql_fetchone(self):
''' Fetch a single row. If there's nothing to fetch, return None.
'''
try:
- return cursor.fetchone()
+ return self.cursor.fetchone()
except gadfly.database.error, message:
if message == 'no more results':
return None
raise
- def save_dbschema(self, cursor, schema):
+ def save_dbschema(self, schema):
''' Save the schema definition that the database currently implements
'''
- self.sql(cursor, 'insert into schema values (?)',
- (self.database_schema,))
+ self.sql('insert into schema values (?)', (self.database_schema,))
- def load_dbschema(self, cursor):
+ def load_dbschema(self):
''' Load the schema definition that the database currently implements
'''
- cursor.execute('select schema from schema')
- return cursor.fetchone()[0]
+ self.cursor.execute('select schema from schema')
+ return self.cursor.fetchone()[0]
- def save_journal(self, cursor, classname, cols, nodeid, journaldate,
+ def save_journal(self, classname, cols, nodeid, journaldate,
journaltag, action, params):
''' Save the journal entry to the database
'''
cols)
if __debug__:
print >>hyperdb.DEBUG, 'addjournal', (self, sql, entry)
- cursor.execute(sql, entry)
+ self.cursor.execute(sql, entry)
- def load_journal(self, cursor, classname, cols, nodeid):
+ def load_journal(self, classname, cols, nodeid):
''' Load the journal from the database
'''
# now get the journal entries
self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'getjournal', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
+ self.cursor.execute(sql, (nodeid,))
res = []
- for nodeid, date_stamp, user, action, params in cursor.fetchall():
+ for nodeid, date_stamp, user, action, params in self.cursor.fetchall():
res.append((nodeid, date.Date(date_stamp), user, action, params))
return res
args = tuple(args)
if __debug__:
print >>hyperdb.DEBUG, 'filter', (self, sql, args)
- cursor = self.db.conn.cursor()
- cursor.execute(sql, args)
- l = cursor.fetchall()
+ self.db.cursor.execute(sql, args)
+ l = self.db.cursor.fetchall()
# return the IDs
return [row[0] for row in l]
index 965245f4be22e1e6d91a2d5360d9166b27ee88e4..e5f64b0a8598ad73d8f33bcb4b87399c69943f8c 100644 (file)
-# $Id: back_sqlite.py,v 1.3 2002-09-19 02:37:41 richard Exp $
+# $Id: back_sqlite.py,v 1.4 2002-09-23 06:48:35 richard Exp $
__doc__ = '''
See https://pysqlite.sourceforge.net/ for pysqlite info
'''
os.umask(0002)
db = os.path.join(self.config.DATABASE, 'db')
self.conn = sqlite.connect(db=db)
- cursor = self.conn.cursor()
+ self.cursor = self.conn.cursor()
try:
- self.database_schema = self.load_dbschema(cursor)
+ self.database_schema = self.load_dbschema()
except sqlite.DatabaseError, error:
if str(error) != 'no such table: schema':
raise
self.database_schema = {}
- cursor = self.conn.cursor()
- cursor.execute('create table schema (schema varchar)')
- cursor.execute('create table ids (name varchar, num integer)')
+ self.cursor.execute('create table schema (schema varchar)')
+ self.cursor.execute('create table ids (name varchar, num integer)')
def __repr__(self):
return '<roundlite 0x%x>'%id(self)
- def sql_fetchone(self, cursor):
+ def sql_fetchone(self):
''' Fetch a single row. If there's nothing to fetch, return None.
'''
- return cursor.fetchone()
+ return self.cursor.fetchone()
def sql_commit(self):
''' Actually commit to the database.
if str(error) != 'cannot commit - no transaction is active':
raise
- def save_dbschema(self, cursor, schema):
+ def save_dbschema(self, schema):
''' Save the schema definition that the database currently implements
'''
s = repr(self.database_schema)
- self.sql(cursor, 'insert into schema values (%s)', (s,))
+ self.sql('insert into schema values (%s)', (s,))
- def load_dbschema(self, cursor):
+ def load_dbschema(self):
''' Load the schema definition that the database currently implements
'''
- cursor.execute('select schema from schema')
- return eval(cursor.fetchone()[0])
+ self.cursor.execute('select schema from schema')
+ return eval(self.cursor.fetchone()[0])
- def save_journal(self, cursor, classname, cols, nodeid, journaldate,
+ def save_journal(self, classname, cols, nodeid, journaldate,
journaltag, action, params):
''' Save the journal entry to the database
'''
cols, a, a, a, a, a)
if __debug__:
print >>hyperdb.DEBUG, 'addjournal', (self, sql, entry)
- cursor.execute(sql, entry)
+ self.cursor.execute(sql, entry)
- def load_journal(self, cursor, classname, cols, nodeid):
+ def load_journal(self, classname, cols, nodeid):
''' Load the journal from the database
'''
# now get the journal entries
self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'getjournal', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
+ self.cursor.execute(sql, (nodeid,))
res = []
- for nodeid, date_stamp, user, action, params in cursor.fetchall():
+ for nodeid, date_stamp, user, action, params in self.cursor.fetchall():
params = eval(params)
res.append((nodeid, date.Date(date_stamp), user, action, params))
return res
index e53b965cb90dda4a7fe4790877f5519029b6cf0c..29f0ca7ac01f835eb35adb44e12fd8ed13d0ebe5 100644 (file)
-# $Id: rdbms_common.py,v 1.10 2002-09-23 00:50:32 richard Exp $
+# $Id: rdbms_common.py,v 1.11 2002-09-23 06:48:35 richard Exp $
# standard python modules
import sys, os, time, re, errno, weakref, copy
'''
raise NotImplemented
- def sql(self, cursor, sql, args=None):
+ def sql(self, sql, args=None):
''' Execute the sql with the optional args.
'''
if __debug__:
print >>hyperdb.DEBUG, (self, sql, args)
if args:
- cursor.execute(sql, args)
+ self.cursor.execute(sql, args)
else:
- cursor.execute(sql)
+ self.cursor.execute(sql)
- def sql_fetchone(self, cursor):
+ def sql_fetchone(self):
''' Fetch a single row. If there's nothing to fetch, return None.
'''
raise NotImplemented
'''
return re.sub("'", "''", str(value))
- def save_dbschema(self, cursor, schema):
+ def save_dbschema(self, schema):
''' Save the schema definition that the database currently implements
'''
raise NotImplemented
- def load_dbschema(self, cursor):
+ def load_dbschema(self):
''' Load the schema definition that the database currently implements
'''
raise NotImplemented
# update the database version of the schema
if save:
- cursor = self.conn.cursor()
- self.sql(cursor, 'delete from schema')
- self.save_dbschema(cursor, self.database_schema)
+ self.sql('delete from schema')
+ self.save_dbschema(self.database_schema)
# reindex the db if necessary
if self.indexer.should_reindex():
dbspec_propnames.append(propname)
dbspec_props[propname] = prop
- # we're going to need one of these
- cursor = self.conn.cursor()
-
# now compare
for propname in spec_propnames:
prop = spec_props[propname]
# add the property
if isinstance(prop, Multilink):
# all we have to do here is create a new table, easy!
- self.create_multilink_table(cursor, spec, propname)
+ self.create_multilink_table(spec, propname)
continue
# no ALTER TABLE, so we:
sql = 'select %s,%s from _%s'%(','.join(oldcols), self.arg, cn)
if __debug__:
print >>hyperdb.DEBUG, 'update_class', (self, sql, None)
- cursor.execute(sql, (None,))
- olddata = cursor.fetchall()
+ self.cursor.execute(sql, (None,))
+ olddata = self.cursor.fetchall()
# 2. drop the old table
- cursor.execute('drop table _%s'%cn)
+ self.cursor.execute('drop table _%s'%cn)
# 3. create the new table
- cols, mls = self.create_class_table(cursor, spec)
+ cols, mls = self.create_class_table(spec)
# ensure the new column is last
cols.remove('_'+propname)
assert oldcols == cols, "Column lists don't match!"
# do the insert
for row in olddata:
- self.sql(cursor, sql, tuple(row))
+ self.sql(sql, tuple(row))
else:
# modify the property
sql = 'drop table %s_%s'%(spec.classname, prop)
if __debug__:
print >>hyperdb.DEBUG, 'update_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
else:
# no ALTER TABLE, so we:
# 1. pull out the data, excluding the removed column
oldcols.remove('_'+propname)
cn = spec.classname
sql = 'select %s from _%s'%(','.join(oldcols), cn)
- cursor.execute(sql, (None,))
+ self.cursor.execute(sql, (None,))
olddata = sql.fetchall()
# 2. drop the old table
- cursor.execute('drop table _%s'%cn)
+ self.cursor.execute('drop table _%s'%cn)
# 3. create the new table
- cols, mls = self.create_class_table(self, cursor, spec)
+ cols, mls = self.create_class_table(self, spec)
assert oldcols != cols, "Column lists don't match!"
# 4. populate with the data from step one
qs = ','.join([self.arg for x in cols])
sql = 'insert into _%s values (%s)'%(cn, s)
- cursor.execute(sql, olddata)
+ self.cursor.execute(sql, olddata)
return 1
- def create_class_table(self, cursor, spec):
+ def create_class_table(self, spec):
''' create the class table for the given spec
'''
cols, mls = self.determine_columns(spec.properties.items())
sql = 'create table _%s (%s)'%(spec.classname, scols)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
return cols, mls
- def create_journal_table(self, cursor, spec):
+ def create_journal_table(self, spec):
''' create the journal table for a class given the spec and
already-determined cols
'''
sql = 'create table %s__journal (%s)'%(spec.classname, cols)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
- def create_multilink_table(self, cursor, spec, ml):
+ def create_multilink_table(self, spec, ml):
''' Create a multilink table for the "ml" property of the class
given by the spec
'''
spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
def create_class(self, spec):
''' Create a database table according to the given spec.
'''
- cursor = self.conn.cursor()
- cols, mls = self.create_class_table(cursor, spec)
- self.create_journal_table(cursor, spec)
+ cols, mls = self.create_class_table(spec)
+ self.create_journal_table(spec)
# now create the multilink tables
for ml in mls:
- self.create_multilink_table(cursor, spec, ml)
+ self.create_multilink_table(spec, ml)
# ID counter
sql = 'insert into ids (name, num) values (%s,%s)'%(self.arg, self.arg)
vals = (spec.classname, 1)
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
def drop_class(self, spec):
''' Drop the given table from the database.
for col, prop in spec.properties.items():
if isinstance(prop, Multilink):
mls.append(col)
- cursor = self.conn.cursor()
sql = 'drop table _%s'%spec.classname
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
sql = 'drop table %s__journal'%spec.classname
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
for ml in mls:
sql = 'drop table %s_%s'%(spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
#
# Classes
'''
if __debug__:
print >>hyperdb.DEBUG, 'clear', (self,)
- cursor = self.conn.cursor()
for cn in self.classes.keys():
sql = 'delete from _%s'%cn
if __debug__:
print >>hyperdb.DEBUG, 'clear', (self, sql)
- cursor.execute(sql)
+ self.cursor.execute(sql)
#
# Node IDs
''' Generate a new id for the given class
'''
# get the next ID
- cursor = self.conn.cursor()
sql = 'select num from ids where name=%s'%self.arg
if __debug__:
print >>hyperdb.DEBUG, 'newid', (self, sql, classname)
- cursor.execute(sql, (classname, ))
- newid = cursor.fetchone()[0]
+ self.cursor.execute(sql, (classname, ))
+ newid = self.cursor.fetchone()[0]
# update the counter
sql = 'update ids set num=%s where name=%s'%(self.arg, self.arg)
vals = (int(newid)+1, classname)
if __debug__:
print >>hyperdb.DEBUG, 'newid', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
# return as string
return str(newid)
def setid(self, classname, setid):
''' Set the id counter: used during import of database
'''
- cursor = self.conn.cursor()
sql = 'update ids set num=%s where name=%s'%(self.arg, self.arg)
vals = (setid, classname)
if __debug__:
print >>hyperdb.DEBUG, 'setid', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
#
# Nodes
cols = ','.join(cols) + ',id,__retired__'
# perform the inserts
- cursor = self.conn.cursor()
sql = 'insert into _%s (%s) values (%s)'%(classname, cols, s)
if __debug__:
print >>hyperdb.DEBUG, 'addnode', (self, sql, vals)
- cursor.execute(sql, vals)
+ self.cursor.execute(sql, vals)
# insert the multilink rows
for col in mls:
for entry in node[col]:
sql = 'insert into %s (linkid, nodeid) values (%s,%s)'%(t,
self.arg, self.arg)
- self.sql(cursor, sql, (entry, nodeid))
+ self.sql(sql, (entry, nodeid))
# make sure we do the commit-time extra stuff for this node
self.transactions.append((self.doSaveNode, (classname, nodeid, node)))
cols.append('_'+col)
cols.sort()
- cursor = self.conn.cursor()
-
# if there's any updates to regular columns, do them
if cols:
# make sure the ordering is correct for column name -> column value
sql = 'update _%s set %s where id=%s'%(classname, s, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'setnode', (self, sql, sqlvals)
- cursor.execute(sql, sqlvals)
+ self.cursor.execute(sql, sqlvals)
# now the fun bit, updating the multilinks ;)
for col, (add, remove) in multilink_changes.items():
sql = 'insert into %s (nodeid, linkid) values (%s,%s)'%(tn,
self.arg, self.arg)
for addid in add:
- self.sql(cursor, sql, (nodeid, addid))
+ self.sql(sql, (nodeid, addid))
if remove:
sql = 'delete from %s where nodeid=%s and linkid=%s'%(tn,
self.arg, self.arg)
for removeid in remove:
- self.sql(cursor, sql, (nodeid, removeid))
+ self.sql(sql, (nodeid, removeid))
# make sure we do the commit-time extra stuff for this node
self.transactions.append((self.doSaveNode, (classname, nodeid, values)))
scols = ','.join(cols)
# perform the basic property fetch
- cursor = self.conn.cursor()
sql = 'select %s from _%s where id=%s'%(scols, classname, self.arg)
- self.sql(cursor, sql, (nodeid,))
+ self.sql(sql, (nodeid,))
- values = self.sql_fetchone(cursor)
+ values = self.sql_fetchone()
if values is None:
raise IndexError, 'no such %s node %s'%(classname, nodeid)
# get the link ids
sql = 'select linkid from %s_%s where nodeid=%s'%(classname, col,
self.arg)
- cursor.execute(sql, (nodeid,))
+ self.cursor.execute(sql, (nodeid,))
# extract the first column from the result
- node[col] = [x[0] for x in cursor.fetchall()]
+ node[col] = [x[0] for x in self.cursor.fetchall()]
# un-dbificate the node data
node = self.unserialise(classname, node)
self.transactions.remove(entry)
# now do the SQL
- cursor = self.conn.cursor()
sql = 'delete from _%s where id=%s'%(classname, self.arg)
- self.sql(cursor, sql, (nodeid,))
+ self.sql(sql, (nodeid,))
# remove from multilnks
cl = self.getclass(classname)
for col in mls:
# get the link ids
sql = 'delete from %s_%s where nodeid=%s'%(classname, col, self.arg)
- cursor.execute(sql, (nodeid,))
+ self.cursor.execute(sql, (nodeid,))
# remove journal entries
sql = 'delete from %s__journal where nodeid=%s'%(classname, self.arg)
- self.sql(cursor, sql, (nodeid,))
+ self.sql(sql, (nodeid,))
def serialise(self, classname, node):
'''Copy the node contents, converting non-marshallable data into
def hasnode(self, classname, nodeid):
''' Determine if the database has a given node.
'''
- cursor = self.conn.cursor()
sql = 'select count(*) from _%s where id=%s'%(classname, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'hasnode', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
- return int(cursor.fetchone()[0])
+ self.cursor.execute(sql, (nodeid,))
+ return int(self.cursor.fetchone()[0])
def countnodes(self, classname):
''' Count the number of nodes that exist for a particular Class.
'''
- cursor = self.conn.cursor()
sql = 'select count(*) from _%s'%classname
if __debug__:
print >>hyperdb.DEBUG, 'countnodes', (self, sql)
- cursor.execute(sql)
- return cursor.fetchone()[0]
+ self.cursor.execute(sql)
+ return self.cursor.fetchone()[0]
def getnodeids(self, classname, retired=0):
''' Retrieve all the ids of the nodes for a particular Class.
Set retired=None to get all nodes. Otherwise it'll get all the
retired or non-retired nodes, depending on the flag.
'''
- cursor = self.conn.cursor()
# flip the sense of the flag if we don't want all of them
if retired is not None:
retired = not retired
sql = 'select id from _%s where __retired__ <> %s'%(classname, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'getnodeids', (self, sql, retired)
- cursor.execute(sql, (retired,))
- return [x[0] for x in cursor.fetchall()]
+ self.cursor.execute(sql, (retired,))
+ return [x[0] for x in self.cursor.fetchall()]
def addjournal(self, classname, nodeid, action, params, creator=None,
creation=None):
print >>hyperdb.DEBUG, 'addjournal', (nodeid, journaldate,
journaltag, action, params)
- cursor = self.conn.cursor()
- self.save_journal(cursor, classname, cols, nodeid, journaldate,
+ self.save_journal(classname, cols, nodeid, journaldate,
journaltag, action, params)
- def save_journal(self, cursor, classname, cols, nodeid, journaldate,
+ def save_journal(self, classname, cols, nodeid, journaldate,
journaltag, action, params):
''' Save the journal entry to the database
'''
if not self.hasnode(classname, nodeid):
raise IndexError, '%s has no node %s'%(classname, nodeid)
- cursor = self.conn.cursor()
cols = ','.join('nodeid date tag action params'.split())
- return self.load_journal(cursor, classname, cols, nodeid)
+ return self.load_journal(classname, cols, nodeid)
- def load_journal(self, cursor, classname, cols, nodeid):
+ def load_journal(self, classname, cols, nodeid):
''' Load the journal from the database
'''
raise NotImplemented
date_stamp = pack_before.serialise()
# do the delete
- cursor = self.conn.cursor()
for classname in self.classes.keys():
sql = "delete from %s__journal where date<%s and "\
"action<>'create'"%(classname, self.arg)
if __debug__:
print >>hyperdb.DEBUG, 'pack', (self, sql, date_stamp)
- cursor.execute(sql, (date_stamp,))
+ self.cursor.execute(sql, (date_stamp,))
def sql_commit(self):
''' Actually commit to the database.
if self.db.journaltag is None:
raise DatabaseError, 'Database open read-only'
- cursor = self.db.conn.cursor()
sql = 'update _%s set __retired__=1 where id=%s'%(self.classname,
self.db.arg)
if __debug__:
print >>hyperdb.DEBUG, 'retire', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
+ self.db.cursor.execute(sql, (nodeid,))
def is_retired(self, nodeid):
'''Return true if the node is rerired
'''
- cursor = self.db.conn.cursor()
sql = 'select __retired__ from _%s where id=%s'%(self.classname,
self.db.arg)
if __debug__:
print >>hyperdb.DEBUG, 'is_retired', (self, sql, nodeid)
- cursor.execute(sql, (nodeid,))
- return int(cursor.fetchone()[0])
+ self.db.cursor.execute(sql, (nodeid,))
+ return int(self.db.sql_fetchone()[0])
def destroy(self, nodeid):
'''Destroy a node.
if not self.key:
raise TypeError, 'No key property set for class %s'%self.classname
- cursor = self.db.conn.cursor()
sql = 'select id,__retired__ from _%s where _%s=%s'%(self.classname,
self.key, self.db.arg)
- self.db.sql(cursor, sql, (keyvalue,))
+ self.db.sql(sql, (keyvalue,))
# see if there was a result that's not retired
- l = cursor.fetchall()
+ l = self.db.cursor.fetchall()
if not l or int(l[0][1]):
raise KeyError, 'No key (%s) value "%s" for "%s"'%(self.key,
keyvalue, self.classname)
tables.append('select nodeid from %s_%s where linkid in (%s)'%(
self.classname, prop, ','.join([a for x in values.keys()])))
sql = '\nintersect\n'.join(tables)
- if __debug__:
- print >>hyperdb.DEBUG, 'find', (self, sql, allvalues)
- cursor = self.db.conn.cursor()
- cursor.execute(sql, allvalues)
+ self.db.sql(sql, allvalues)
try:
- l = [x[0] for x in cursor.fetchall()]
+ l = [x[0] for x in self.db.cursor.fetchall()]
except gadfly.database.error, message:
if message == 'no more results':
l = []
args = tuple(args)
if __debug__:
print >>hyperdb.DEBUG, 'filter', (self, sql, args)
- cursor = self.db.conn.cursor()
- cursor.execute(sql, args)
- l = cursor.fetchall()
+ self.db.cursor.execute(sql, args)
+ l = self.db.cursor.fetchall()
# return the IDs (the first column)
# XXX The filter(None, l) bit is sqlite-specific... if there's _NO_
index 781afb0424f69210f6099890a51e675d09e3b08f..72a0c4a02ca09a95ae617d1f1ea0922b1894882a 100644 (file)
class NoTemplate(Exception):
pass
+def precompileTemplates(dir):
+ ''' Go through a directory and precompile all the templates therein
+ '''
+ for filename in os.listdir(dir):
+ if os.path.isdir(filename): continue
+ if '.' in filename:
+ name, extension = filename.split('.')
+ getTemplate(dir, name, extension)
+ else:
+ getTemplate(dir, filename, None)
+
def getTemplate(dir, name, extension, classname=None, request=None):
''' Interface to get a template, possibly loading a compiled template.
diff --git a/roundup/date.py b/roundup/date.py
index 1cc6464b9aa8748f64e13ce0a4c5b9a1d41d3c72..e048927d7aa018e42d14f892b0014f425b4c91b6 100644 (file)
--- a/roundup/date.py
+++ b/roundup/date.py
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-# $Id: date.py,v 1.28 2002-09-10 12:44:42 richard Exp $
+# $Id: date.py,v 1.29 2002-09-23 06:48:34 richard Exp $
__doc__ = """
Date, time and time interval handling.
(((?P<H>\d?\d):(?P<M>\d\d))?(:(?P<S>\d\d))?)? # hh:mm:ss
(?P<o>.+)? # offset
''', re.VERBOSE), serialised_re=re.compile('''
- (?P<y>\d{4})(?P<m>\d{2})(?P<d>\d{2}) # yyyymmdd
- (?P<H>\d{2})(?P<M>\d{2})(?P<S>\d{2}) # HHMMSS
- ''', re.VERBOSE)):
+ (\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})
+ ''')):
''' set the date to the value in spec
'''
m = serialised_re.match(spec)
+ if m:
+ # we're serialised - easy!
+ self.year, self.month, self.day, self.hour, self.minute, \
+ self.second = map(int, m.groups()[1:7])
+ return
+
+ # not serialised data, try usual format
+ m = date_re.match(spec)
if not m:
- m = date_re.match(spec)
- if not m:
- raise ValueError, _('Not a date spec: [[yyyy-]mm-dd].'
- '[[h]h:mm[:ss]][offset]')
+ raise ValueError, _('Not a date spec: [[yyyy-]mm-dd].'
+ '[[h]h:mm[:ss]][offset]')
info = m.groupdict()
index 8b49efcf450b0a1ca6998db868f82c39f9d9a1c3..72d7d380b0fac9c9220af2cdd59c922806d186e6 100644 (file)
#
""" HTTP Server that serves roundup.
-$Id: roundup_server.py,v 1.11 2002-09-23 00:50:32 richard Exp $
+$Id: roundup_server.py,v 1.12 2002-09-23 06:48:35 richard Exp $
"""
# python version check
## end configuration
#
-
class RoundupRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
TRACKER_HOMES = TRACKER_HOMES
ROUNDUP_USER = ROUNDUP_USER
diff --git a/test/test_db.py b/test/test_db.py
index 33bc69caee2492bd67798394328bb1cb8f4bccae..2af0a5cace6d7c03dc08519bc71a54e8cbd705ac 100644 (file)
--- a/test/test_db.py
+++ b/test/test_db.py
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-# $Id: test_db.py,v 1.53 2002-09-20 19:26:28 gmcm Exp $
+# $Id: test_db.py,v 1.54 2002-09-23 06:48:35 richard Exp $
import unittest, os, shutil, time
(x, date_stamp2, x, x, x) = entry
# see if the change was journalled when it shouldn't have been
self.assertEqual(date_stamp, date_stamp2)
+ time.sleep(1)
self.db.issue.enableJournalling()
self.db.issue.set('1', title='hello world 2')
self.db.commit()
self.db.commit()
# sleep for at least a second, then get a date to pack at
- time.sleep(1)
+ time.sleep(2)
pack_before = date.Date('.')
- time.sleep(1)
+ time.sleep(2)
# one more entry
self.db.issue.set(id, status='3')
unittest.makeSuite(anydbmDBTestCase, 'test'),
unittest.makeSuite(anydbmReadOnlyDBTestCase, 'test')
]
-# return unittest.TestSuite(l)
+ return unittest.TestSuite(l)
try:
import gadfly