summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 28c868f)
raw | patch | inline | side by side (parent: 28c868f)
author | richard <richard@57a73879-2fb5-44c3-a270-3262357dd7e2> | |
Tue, 11 Nov 2003 11:19:18 +0000 (11:19 +0000) | ||
committer | richard <richard@57a73879-2fb5-44c3-a270-3262357dd7e2> | |
Tue, 11 Nov 2003 11:19:18 +0000 (11:19 +0000) |
the backend module itself)
- also cleaned up the index maintenance code (actual checks for existence
rather than bare-except failure mode)
git-svn-id: http://svn.roundup-tracker.org/svnroot/roundup/trunk@1975 57a73879-2fb5-44c3-a270-3262357dd7e2
- also cleaned up the index maintenance code (actual checks for existence
rather than bare-except failure mode)
git-svn-id: http://svn.roundup-tracker.org/svnroot/roundup/trunk@1975 57a73879-2fb5-44c3-a270-3262357dd7e2
diff --git a/CHANGES.txt b/CHANGES.txt
index fed3b7593c5c94d1fcb6a7cdf93763611ca10ee3..2c700e8a237bbdd5a56eef7f70a537c4a519892f 100644 (file)
--- a/CHANGES.txt
+++ b/CHANGES.txt
- support setgid and running on port < 1024 (sf patch 777528)
- using Zope3's test runner now, allowing GC checks, nicer controls and
coverage analysis
-- !BETA! added postgresql backend, needs work !BETA!
+- added postgresql backend
- all RDBMS backends now have indexes on several columns
- Change nosymessage and send_message to accept msgid=None (RFE #707235).
diff --git a/doc/postgresql.txt b/doc/postgresql.txt
index a5917c13412012254a348842219acf4c8d83ddef..4b634ba2980fa78daf617e8b81cbb9c688055dab 100644 (file)
--- a/doc/postgresql.txt
+++ b/doc/postgresql.txt
http://initd.org/software/initd/psycopg
+Running the PostgreSQL unit tests
+=================================
+
+The user that you're running the tests as will need to be able to access
+the postgresql database on the local machine and create and drop
+databases. Edit the ``test/test_postgresql.py`` database connection info if
+you wish to test against a different database.
+
+The test database will be called "rounduptest".
+
+
Additional configuration
========================
(config.py in the tracker's home directory) should be appended with the
following constants (substituting real values, obviously)::
- POSTGRESQL_DBHOST = 'localhost'
- POSTGRESQL_DBUSER = 'roundup'
- POSTGRESQL_DBPASSWORD = 'roundup'
- POSTGRESQL_DBNAME = 'roundup'
- POSTGRESQL_PORT = 5432
+ POSTGRESQL_DATABASE = {'database': 'rounduptest'}
+
+if not local, or a different user is to be used, then more information may
+be supplied::
+
POSTGRESQL_DATABASE = {
- 'host': MYSQL_DBHOST, 'port': POSTGRESQL_PORT,
- 'user': MYSQL_DBUSER, 'password': MYSQL_DBPASSWORD,
- 'database': MYSQL_DBNAME
+ 'host': 'localhost', 'port': 5432,
+ 'database': 'roundup'
+ 'user': 'roundup', 'password': 'roundup',
}
Also note that you can leave some values out of
``POSTGRESQL_DATABASE``: 'host' and 'port' are not necessary when
connecting to a local database and 'password'
is optional if postgres trusts local connections. The user specified in
-``POSTGRESQL_DBUSER`` must have rights to create a new database and to
+``user`` must have rights to create a new database and to
connect to the "template1" database, used while initializing roundup.
Have fun with psycopg,
index 05f2de33a685c859e45ce134254fcc5e5bbe6e9c..bac7d7a60df507cf2755703a8e1076e38e83608f 100644 (file)
def sql_fetchall(self):
return self.cursor.fetchall()
-
+
+ def sql_index_exists(self, table_name, index_name):
+ self.cursor.execute('show index from %s'%table_name)
+ for index in self.cursor.fetchall():
+ if index[2] == index_name:
+ return 1
+ return 0
+
def save_dbschema(self, schema):
s = repr(self.database_schema)
self.sql('INSERT INTO schema VALUES (%s)', (s,))
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ self.create_class_table_indexes(spec)
return cols, mls
+ def drop_class_table_indexes(self, cn, key):
+ # drop the old table indexes first
+ l = ['_%s_id_idx'%cn, '_%s_retired_idx'%cn]
+ if key:
+ l.append('_%s_%s_idx'%(cn, key))
+
+ table_name = '_%s'%cn
+ for index_name in l:
+ if not self.sql_index_exists(table_name, index_name):
+ continue
+ index_sql = 'drop index %s on %s'%(index_name, table_name)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ self.cursor.execute(index_sql)
+
def create_journal_table(self, spec):
cols = ',' . join(['`%s` VARCHAR(255)'%x
for x in 'nodeid date tag action params' . split()])
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ self.create_journal_table_indexes(spec)
+
+ def drop_journal_table_indexes(self, classname):
+ index_name = '%s_journ_idx'%classname
+ if not self.sql_index_exists('%s__journal'%classname, index_name):
+ return
+ index_sql = 'drop index %s on %s__journal'%(index_name, classname)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ self.cursor.execute(index_sql)
def create_multilink_table(self, spec, ml):
sql = '''CREATE TABLE `%s_%s` (linkid VARCHAR(255),
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ self.create_multilink_table_indexes(spec, ml)
+
+ def drop_multilink_table_indexes(self, classname, ml):
+ l = [
+ '%s_%s_l_idx'%(classname, ml),
+ '%s_%s_n_idx'%(classname, ml)
+ ]
+ for index_name in l:
+ if not self.sql_index_exists(table_name, index_name):
+ continue
+ index_sql = 'drop index %s on %s'%(index_name, table_name)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ self.cursor.execute(index_sql)
class MysqlClass:
# we're overriding this method for ONE missing bit of functionality.
index b90ced538816d4a66b634533b13357d6278e092f..762f7f3438777be0590810ecdd02c0e847fbf831 100644 (file)
from roundup.backends.rdbms_common import *
from roundup.backends import rdbms_common
import psycopg
-import os, shutil
-
-class Maintenance:
- """ Database maintenance functions """
- def db_nuke(self, config):
- """Clear all database contents and drop database itself"""
- config.POSTGRESQL_DATABASE['database'] = 'template1'
- db = Database(config, 'admin')
- db.conn.set_isolation_level(0)
- db.sql("DROP DATABASE %s" % config.POSTGRESQL_DBNAME)
- db.sql("CREATE DATABASE %s" % config.POSTGRESQL_DBNAME)
- if os.path.exists(config.DATABASE):
- shutil.rmtree(config.DATABASE)
- config.POSTGRESQL_DATABASE['database'] = config.POSTGRESQL_DBNAME
-
- def db_exists(self, config):
- """Check if database already exists"""
- try:
- db = Database(config, 'admin')
- return 1
- except:
- return 0
+import os, shutil, popen2
class Database(Database):
arg = '%s'
self.conn.close()
def __repr__(self):
- return '<psycopgroundsql 0x%x>' % id(self)
+ return '<roundpsycopgsql 0x%x>' % id(self)
def sql_fetchone(self):
return self.cursor.fetchone()
return self.cursor.fetchall()
def sql_stringquote(self, value):
- return psycopg.QuotedString(str(value))
+ ''' psycopg.QuotedString returns a "buffer" object with the
+ single-quotes around it... '''
+ return str(psycopg.QuotedString(str(value)))[1:-1]
+
+ def sql_index_exists(self, table_name, index_name):
+ sql = 'select count(*) from pg_indexes where ' \
+ 'tablename=%s and indexname=%s'%(self.arg, self.arg)
+ self.cursor.execute(sql, (table_name, index_name))
+ return self.cursor.fetchone()[0]
def save_dbschema(self, schema):
s = repr(self.database_schema)
self.cursor.execute(sql)
- # Static methods
- nuke = Maintenance().db_nuke
- exists = Maintenance().db_exists
-
class PsycopgClass:
def find(self, **propspec):
"""Get the ids of nodes in this class which link to the given nodes."""
if type(values) is type(''):
allvalues += (values,)
where.append('_%s = %s' % (prop, a))
+ elif values is None:
+ where.append('_%s is NULL'%prop)
else:
allvalues += tuple(values.keys())
where.append('_%s in (%s)' % (prop, ','.join([a]*len(values))))
index d0091b6828e758c3a30370b06531fd52d9070f27..25c69b9296ae1ea6b02501974c67f91e43889d09 100644 (file)
-# $Id: back_sqlite.py,v 1.10 2003-10-07 07:17:54 anthonybaxter Exp $
+# $Id: back_sqlite.py,v 1.11 2003-11-11 11:19:18 richard Exp $
__doc__ = '''
See https://pysqlite.sourceforge.net/ for pysqlite info
'''
if str(error) != 'cannot commit - no transaction is active':
raise
+ def sql_index_exists(self, table_name, index_name):
+ self.cursor.execute('pragma index_list(%s)'%table_name)
+ for entry in self.cursor.fetchall():
+ if entry[1] == index_name:
+ return 1
+ return 0
+
def save_dbschema(self, schema):
''' Save the schema definition that the database currently implements
'''
index 4c5760cdc56d4f3cb4594cb3c8a6ea0395460fc4..36eb94bf195bc4fec40d0cecc3f2154451b59427 100644 (file)
-# $Id: rdbms_common.py,v 1.66 2003-10-25 22:53:26 richard Exp $
+# $Id: rdbms_common.py,v 1.67 2003-11-11 11:19:18 richard Exp $
''' Relational database (SQL) backend common code.
Basics:
def update_class(self, spec, old_spec, force=0):
''' Determine the differences between the current spec and the
database version of the spec, and update where necessary.
+
If 'force' is true, update the database anyway.
'''
new_has = spec.properties.has_key
old_has = {}
for name,prop in old_spec[1]:
old_has[name] = 1
- if (force or not new_has(name)) and isinstance(prop, Multilink):
- # it's a multilink, and it's been removed - drop the old
- # table. First drop indexes.
- index_sqls = [ 'drop index %s_%s_l_idx'%(spec.classname, ml),
- 'drop index %s_%s_n_idx'%(spec.classname, ml) ]
- for index_sql in index_sqls:
- if __debug__:
- print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
- try:
- self.cursor.execute(index_sql)
- except:
- # The database may not actually have any indexes.
- # assume the worst.
- pass
- sql = 'drop table %s_%s'%(spec.classname, prop)
- if __debug__:
- print >>hyperdb.DEBUG, 'update_class', (self, sql)
- self.cursor.execute(sql)
+ if new_has(name) or not isinstance(prop, Multilink):
continue
+ # it's a multilink, and it's been removed - drop the old
+ # table. First drop indexes.
+ self.drop_multilink_table_indexes(spec.classname, ml)
+ sql = 'drop table %s_%s'%(spec.classname, prop)
+ if __debug__:
+ print >>hyperdb.DEBUG, 'update_class', (self, sql)
+ self.cursor.execute(sql)
old_has = old_has.has_key
# now figure how we populate the new table
self.cursor.execute(sql)
olddata = self.cursor.fetchall()
- # drop the old table indexes first
- index_sqls = [ 'drop index _%s_id_idx'%cn,
- 'drop index _%s_retired_idx'%cn ]
- if old_spec[0]:
- index_sqls.append('drop index _%s_%s_idx'%(cn, old_spec[0]))
- for index_sql in index_sqls:
- if __debug__:
- print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
- try:
- self.cursor.execute(index_sql)
- except:
- # The database may not actually have any indexes.
- # assume the worst.
- pass
+ # TODO: update all the other index dropping code
+ self.drop_class_table_indexes(cn, old_spec[0])
# drop the old table
self.cursor.execute('drop table _%s'%cn)
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ self.create_class_table_indexes(spec)
+
+ return cols, mls
+
+ def create_class_table_indexes(self, spec):
+ ''' create the class table for the given spec
+ '''
# create id index
index_sql1 = 'create index _%s_id_idx on _%s(id)'%(
spec.classname, spec.classname)
print >>hyperdb.DEBUG, 'create_index', (self, index_sql3)
self.cursor.execute(index_sql3)
- return cols, mls
+ def drop_class_table_indexes(self, cn, key):
+ # drop the old table indexes first
+ l = ['_%s_id_idx'%cn, '_%s_retired_idx'%cn]
+ if key:
+ # key prop too?
+ l.append('_%s_%s_idx'%(cn, key))
+
+ # TODO: update all the other index dropping code
+ table_name = '_%s'%cn
+ for index_name in l:
+ if not self.sql_index_exists(table_name, index_name):
+ continue
+ index_sql = 'drop index '+index_name
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ self.cursor.execute(index_sql)
def create_journal_table(self, spec):
''' create the journal table for a class given the spec and
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ self.create_journal_table_indexes(spec)
+ def create_journal_table_indexes(self, spec):
# index on nodeid
index_sql = 'create index %s_journ_idx on %s__journal(nodeid)'%(
spec.classname, spec.classname)
print >>hyperdb.DEBUG, 'create_index', (self, index_sql)
self.cursor.execute(index_sql)
+ def drop_journal_table_indexes(self, classname):
+ index_name = '%s_journ_idx'%classname
+ if not self.sql_index_exists('%s__journal'%classname, index_name):
+ return
+ index_sql = 'drop index '+index_name
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ self.cursor.execute(index_sql)
+
def create_multilink_table(self, spec, ml):
''' Create a multilink table for the "ml" property of the class
given by the spec
if __debug__:
print >>hyperdb.DEBUG, 'create_class', (self, sql)
self.cursor.execute(sql)
+ self.create_multilink_table_indexes(spec, ml)
+ def create_multilink_table_indexes(self, spec, ml):
# create index on linkid
index_sql = 'create index %s_%s_l_idx on %s_%s(linkid)'%(
spec.classname, ml, spec.classname, ml)
print >>hyperdb.DEBUG, 'create_index', (self, index_sql)
self.cursor.execute(index_sql)
+ def drop_multilink_table_indexes(self, classname, ml):
+ l = [
+ '%s_%s_l_idx'%(classname, ml),
+ '%s_%s_n_idx'%(classname, ml)
+ ]
+ table_name = '%s_%s'%(classname, ml)
+ for index_name in l:
+ if not self.sql_index_exists(table_name, index_name):
+ continue
+ index_sql = 'drop index %s'%index_name
+ if __debug__:
+ print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
+ self.cursor.execute(index_sql)
+
def create_class(self, spec):
''' Create a database table according to the given spec.
'''
if isinstance(prop, Multilink):
mls.append(propname)
- index_sqls = [ 'drop index _%s_id_idx'%cn,
- 'drop index _%s_retired_idx'%cn,
- 'drop index %s_journ_idx'%cn ]
- if spec[0]:
- index_sqls.append('drop index _%s_%s_idx'%(cn, spec[0]))
- for index_sql in index_sqls:
- if __debug__:
- print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
- try:
- self.cursor.execute(index_sql)
- except:
- # The database may not actually have any indexes.
- # assume the worst.
- pass
-
+ # drop class table and indexes
+ self.drop_class_table_indexes(cn, spec[0])
sql = 'drop table _%s'%cn
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
self.cursor.execute(sql)
+ # drop journal table and indexes
+ self.drop_journal_table_indexes(cn)
sql = 'drop table %s__journal'%cn
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
self.cursor.execute(sql)
for ml in mls:
- index_sqls = [
- 'drop index %s_%s_n_idx'%(cn, ml),
- 'drop index %s_%s_l_idx'%(cn, ml),
- ]
- for index_sql in index_sqls:
- if __debug__:
- print >>hyperdb.DEBUG, 'drop_index', (self, index_sql)
- try:
- self.cursor.execute(index_sql)
- except:
- # The database may not actually have any indexes.
- # assume the worst.
- pass
+ # drop multilink table and indexes
+ self.drop_multilink_table_indexes(cn, ml)
sql = 'drop table %s_%s'%(spec.classname, ml)
if __debug__:
print >>hyperdb.DEBUG, 'drop_class', (self, sql)
for col in mls:
# get the link ids
sql = 'delete from %s_%s where nodeid=%s'%(classname, col, self.arg)
- self.cursor.execute(sql, (nodeid,))
+ self.sql(sql, (nodeid,))
# remove journal entries
sql = 'delete from %s__journal where nodeid=%s'%(classname, self.arg)
args = tuple(args)
if __debug__:
print >>hyperdb.DEBUG, 'filter', (self, sql, args)
- self.db.cursor.execute(sql, args)
+ if args:
+ self.db.cursor.execute(sql, args)
+ else:
+ # psycopg doesn't like empty args
+ self.db.cursor.execute(sql)
l = self.db.cursor.fetchall()
# return the IDs (the first column)
diff --git a/test/db_test_base.py b/test/db_test_base.py
index 544db1a7db7d1a61dbd7a1873fdec1fdf3919532..01209546ecb8a3c40ac2509e35433586bb629d60 100644 (file)
--- a/test/db_test_base.py
+++ b/test/db_test_base.py
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-# $Id: db_test_base.py,v 1.5 2003-11-10 03:56:39 richard Exp $
+# $Id: db_test_base.py,v 1.6 2003-11-11 11:19:18 richard Exp $
import unittest, os, shutil, errno, imp, sys, time
MESSAGES_TO_AUTHOR = 'no' # either 'yes' or 'no'
EMAIL_SIGNATURE_POSITION = 'bottom'
- # Mysql connection data
- MYSQL_DBHOST = 'localhost'
- MYSQL_DBUSER = 'rounduptest'
- MYSQL_DBPASSWORD = 'rounduptest'
- MYSQL_DBNAME = 'rounduptest'
- MYSQL_DATABASE = (MYSQL_DBHOST, MYSQL_DBUSER, MYSQL_DBPASSWORD, MYSQL_DBNAME)
-
- # Postgresql connection data
- POSTGRESQL_DBHOST = 'localhost'
- POSTGRESQL_DBUSER = 'rounduptest'
- POSTGRESQL_DBPASSWORD = 'rounduptest'
- POSTGRESQL_DBNAME = 'rounduptest'
- POSTGRESQL_PORT = 5432
- POSTGRESQL_DATABASE = {'host': POSTGRESQL_DBHOST, 'port': POSTGRESQL_PORT,
- 'user': POSTGRESQL_DBUSER, 'password': POSTGRESQL_DBPASSWORD,
- 'database': POSTGRESQL_DBNAME}
-
-class nodbconfig(config):
- MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, config.MYSQL_DBPASSWORD)
class DBTest(MyTestCase):
def setUp(self):
diff --git a/test/test_mysql.py b/test/test_mysql.py
index fe045f6bdd55d74070fb5f3e3b7175a01897c1de..724eefc408f40661d081321d6a74be74222ce1ba 100644 (file)
--- a/test/test_mysql.py
+++ b/test/test_mysql.py
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-# $Id: test_mysql.py,v 1.4 2003-11-05 21:54:57 jlgijsbers Exp $
+# $Id: test_mysql.py,v 1.5 2003-11-11 11:19:18 richard Exp $
import unittest, os, shutil, time, imp
from roundup.hyperdb import DatabaseError
from roundup import init, backends
-from db_test_base import DBTest, ROTest, config, SchemaTest, nodbconfig, \
- ClassicInitTest
+from db_test_base import DBTest, ROTest, config, SchemaTest, ClassicInitTest
+
+
+# Mysql connection data
+config.MYSQL_DBHOST = 'localhost'
+config.MYSQL_DBUSER = 'rounduptest'
+config.MYSQL_DBPASSWORD = 'rounduptest'
+config.MYSQL_DBNAME = 'rounduptest'
+config.MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER,
+ config.MYSQL_DBPASSWORD, config.MYSQL_DBNAME)
+
+class nodbconfig(config):
+ MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, config.MYSQL_DBPASSWORD)
class mysqlOpener:
if hasattr(backends, 'mysql'):
try:
# Check if we can run mysql tests
import MySQLdb
- db = mysql.Database(nodbconfig, 'admin')
+ db = mysql.Database(config, 'admin')
db.conn.select_db(config.MYSQL_DBNAME)
db.sql("SHOW TABLES");
tables = db.sql_fetchall()
+ # TODO: reinstate the check here
if 0: #tables:
# Database should be empty. We don't dare to delete any data
raise DatabaseError, "Database %s contains tables"%\
index f96663a847979a98b27ccb6c690c23205a7b8240..c7a957d0f9950b561c0edd37c4e06157e4cccc2e 100644 (file)
--- a/test/test_postgresql.py
+++ b/test/test_postgresql.py
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-# $Id: test_postgresql.py,v 1.2 2003-10-26 14:43:51 jlgijsbers Exp $
+# $Id: test_postgresql.py,v 1.3 2003-11-11 11:19:18 richard Exp $
-import unittest, os, shutil, time
+import sys, unittest, os, shutil, time, popen2
from roundup.hyperdb import DatabaseError
-from db_test_base import DBTest, ROTest, config, SchemaTest, nodbconfig, \
- ClassicInitTest
+from db_test_base import DBTest, ROTest, config, SchemaTest, ClassicInitTest
+
+# Postgresql connection data
+# NOTE: THIS MUST BE A LOCAL DATABASE
+config.POSTGRESQL_DATABASE = {'database': 'rounduptest'}
from roundup import backends
+def db_create():
+ """Clear all database contents and drop database itself"""
+ name = config.POSTGRESQL_DATABASE['database']
+ cout,cin = popen2.popen4('createdb %s'%name)
+ cin.close()
+ response = cout.read().split('\n')[0]
+ if response.find('FATAL') != -1 or response.find('ERROR') != -1:
+ raise RuntimeError, response
+
+def db_nuke(fail_ok=0):
+ """Clear all database contents and drop database itself"""
+ name = config.POSTGRESQL_DATABASE['database']
+ cout,cin = popen2.popen4('dropdb %s'%name)
+ cin.close()
+ response = cout.read().split('\n')[0]
+ if response.endswith('does not exist') and fail_ok:
+ return
+ if response.find('FATAL') != -1 or response.find('ERROR') != -1:
+ raise RuntimeError, response
+ if os.path.exists(config.DATABASE):
+ shutil.rmtree(config.DATABASE)
+
+def db_exists(config):
+ """Check if database already exists"""
+ try:
+ db = Database(config, 'admin')
+ return 1
+ except:
+ return 0
+
class postgresqlOpener:
if hasattr(backends, 'postgresql'):
from roundup.backends import postgresql as module
+ def setUp(self):
+ db_nuke(1)
+ db_create()
+
def tearDown(self):
- self.db.close()
- self.module.Database.nuke(config)
+ db_nuke()
class postgresqlDBTest(postgresqlOpener, DBTest):
- pass
+ def setUp(self):
+ postgresqlOpener.setUp(self)
+ DBTest.setUp(self)
+
+ def tearDown(self):
+ DBTest.tearDown(self)
+ postgresqlOpener.tearDown(self)
+
+ def testFilteringIntervalSort(self):
+ # PostgreSQL sorts NULLs differently to other databases (others
+ # treat it as lower than real values, PG treats it as higher)
+ ae, filt = self.filteringSetup()
+ # ascending should sort None, 1:10, 1d
+ ae(filt(None, {}, ('+','foo'), (None,None)), ['4', '1', '2', '3'])
+ # descending should sort 1d, 1:10, None
+ ae(filt(None, {}, ('-','foo'), (None,None)), ['3', '2', '1', '4'])
class postgresqlROTest(postgresqlOpener, ROTest):
- pass
+ def setUp(self):
+ postgresqlOpener.setUp(self)
+ ROTest.setUp(self)
+
+ def tearDown(self):
+ ROTest.tearDown(self)
+ postgresqlOpener.tearDown(self)
class postgresqlSchemaTest(postgresqlOpener, SchemaTest):
- pass
+ def setUp(self):
+ postgresqlOpener.setUp(self)
+ SchemaTest.setUp(self)
+
+ def tearDown(self):
+ SchemaTest.tearDown(self)
+ postgresqlOpener.tearDown(self)
-class postgresqlClassicInitTest(ClassicInitTest):
+class postgresqlClassicInitTest(postgresqlOpener, ClassicInitTest):
backend = 'postgresql'
+ extra_config = "POSTGRESQL_DATABASE = {'database': 'rounduptest'}"
+ def setUp(self):
+ postgresqlOpener.setUp(self)
+ ClassicInitTest.setUp(self)
+
+ def tearDown(self):
+ ClassicInitTest.tearDown(self)
+ postgresqlOpener.tearDown(self)
def test_suite():
suite = unittest.TestSuite()
if not hasattr(backends, 'postgresql'):
return suite
- from roundup.backends import postgresql
- try:
- # Check if we can run postgresql tests
- import psycopg
- db = postgresql.Database(nodbconfig, 'admin')
- db.conn.select_db(config.POSTGRESQL_DBNAME)
- db.sql("SHOW TABLES");
- tables = db.sql_fetchall()
- if tables:
- # Database should be empty. We don't dare to delete any data
- raise DatabaseError, "(Database %s contains tables)"%\
- config.POSTGRESQL_DBNAME
- db.sql("DROP DATABASE %s" % config.POSTGRESQL_DBNAME)
- db.sql("CREATE DATABASE %s" % config.POSTGRESQL_DBNAME)
- db.close()
- except (psycopg.ProgrammingError, DatabaseError), msg:
- print "Skipping postgresql tests (%s)"%msg
- else:
- print 'Including postgresql tests'
- suite.addTest(unittest.makeSuite(postgresqlDBTest))
- suite.addTest(unittest.makeSuite(postgresqlROTest))
- suite.addTest(unittest.makeSuite(postgresqlSchemaTest))
- suite.addTest(unittest.makeSuite(postgresqlClassicInitTest))
+ # Check if we can run postgresql tests
+ print 'Including postgresql tests'
+ suite.addTest(unittest.makeSuite(postgresqlDBTest))
+ suite.addTest(unittest.makeSuite(postgresqlROTest))
+ suite.addTest(unittest.makeSuite(postgresqlSchemaTest))
+ suite.addTest(unittest.makeSuite(postgresqlClassicInitTest))
return suite