From 2b4148975a919b7895d86222cd85692c60a1d82e Mon Sep 17 00:00:00 2001 From: richard Date: Tue, 7 Oct 2003 08:52:12 +0000 Subject: [PATCH] - Fix a couple of failures in mysql backend unit tests. - Fix the mysql backend docs - Note requirement of MySQL version 4.0+ with InnoDB or BDB mysql backend git-svn-id: http://svn.roundup-tracker.org/svnroot/roundup/trunk@1906 57a73879-2fb5-44c3-a270-3262357dd7e2 --- CHANGES.txt | 4 + doc/Makefile | 2 +- doc/installation.txt | 8 +- doc/mysql.txt | 45 +++--- roundup/backends/back_mysql.py | 286 +++++++++++++++++++++++++-------- test/test_db.py | 73 ++++++++- 6 files changed, 322 insertions(+), 96 deletions(-) diff --git a/CHANGES.txt b/CHANGES.txt index 007759f..1c187f3 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -4,6 +4,10 @@ are given with the most recent entry first. 200?-??-?? 0.7.0 Feature: - support confirming registration by replying to the email (sf bug 763668) +- support setgid and running on port < 1024 (sf patch 777528) + +Fixed: +- mysql documentation fixed to note requirement of 4.0+ and InnoDB 2003-10-?? 0.6.3 diff --git a/doc/Makefile b/doc/Makefile index 5322ca0..8cb79fe 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -2,7 +2,7 @@ PYTHON = /usr/bin/python2 STXTOHTML = rst2html SOURCE = announcement.txt customizing.txt developers.txt FAQ.txt features.txt \ - glossary.txt implementation.txt index.txt design.txt \ + glossary.txt implementation.txt index.txt design.txt mysql.txt \ installation.txt upgrading.txt user_guide.txt maintenance.txt COMPILED := $(SOURCE:.txt=.html) diff --git a/doc/installation.txt b/doc/installation.txt index 17442e4..7054bda 100644 --- a/doc/installation.txt +++ b/doc/installation.txt @@ -2,7 +2,7 @@ Installing Roundup ================== -:Version: $Revision: 1.58 $ +:Version: $Revision: 1.59 $ .. contents:: @@ -244,8 +244,9 @@ There's several to choose from, each with benefits and limitations: **mysql** Backend for popular RDBMS MySQL. According to benchmarks, this backend works much faster than any of \*dbm ones, but slightly slower than metakit and - sqlite. A good scalability is not a property of this backend for now, - though. For more info on backend installation see doc/mysql.txt. + sqlite. Scalability is unknown at present. Roundup requires InnoDB or BDB + support in MySQL. For more info on requirements and installation see + `additional mysql documentation`_ (doc/mysql.txt) **metakit** This backend is implemented over the metakit_ storage system, using Mk4Py as the interface. It scales much better than the dbm backends. @@ -584,4 +585,5 @@ Next: `User Guide`_ .. _`maintenance documentation`: maintenance.html .. _sqlite: http://www.hwaci.com/sw/sqlite/ .. _metakit: http://www.equi4.com/metakit/ +.. _`additional mysql documentation`: doc/mysql.html diff --git a/doc/mysql.txt b/doc/mysql.txt index 980a2e1..13fdb47 100644 --- a/doc/mysql.txt +++ b/doc/mysql.txt @@ -2,7 +2,9 @@ MySQL Backend ============= -This notes detail the MySQL backend for the roundup issue tracker. +:version: $Revision: 1.5 $ + +This notes detail the MySQL backend for the Roundup issue tracker. Prerequisites @@ -11,10 +13,10 @@ Prerequisites To use MySQL as the backend for storing roundup data, you also need to install: - 1. MySQL RDBMS 3.23.34 or higher - http://www.mysql.com. Your MySQL - installation should support Berkeley DB (BDB) tables for transaction - support. - 2. Python MySQL interface - http://sourceforge.net/projects/mysql-python +1. MySQL RDBMS 4.0 or higher - http://www.mysql.com. Your MySQL + installation MUST support InnoDB tables (or Berkeley DB (BDB) tables + if you have no other choice) +2. Python MySQL interface - http://sourceforge.net/projects/mysql-python Running the MySQL tests @@ -23,16 +25,19 @@ Running the MySQL tests Roundup tests expect an empty MySQL database. Two alternate ways to provide this: - 1. If you have root permissions on the MySQL server, you can create - the necessary database entries using this SQL sequence: +1. If you have root permissions on the MySQL server, you can create + the necessary database entries using the follwing SQL sequence. Use + ``mysql`` on the command line to enter:: - CREATE DATABASE rounduptest - GRANT ALL PRIVILEGES ON rounduptest TO rounduptest@localhost IDENTIFIED BY 'rounduptest' - FLUSH PRIVILEGES + CREATE DATABASE rounduptest; + USE rounduptest; + GRANT ALL PRIVILEGES ON rounduptest.* TO rounduptest@localhost + IDENTIFIED BY 'rounduptest'; + FLUSH PRIVILEGES; - 2. If your administrator has provided you with database connection info, - you can modify MYSQL_* constants in the file test/test_db.py with - the correct values. +2. If your administrator has provided you with database connection info, + you can modify MYSQL_* constants in the file test/test_db.py with + the correct values. Note that the MySQL database should not contain any tables. Tests will not drop the database with existing data. @@ -43,19 +48,17 @@ Additional configuration To initialise and use the MySQL database backend, roundup's configuration file (config.py in the tracker's home directory) should have the following -entries: +entries:: MYSQL_DBHOST = 'localhost' MYSQL_DBUSER = 'rounduptest' MYSQL_DBPASSWORD = 'rounduptest' MYSQL_DBNAME = 'rounduptest' - MYSQL_DATABASE = ( MYSQL_DBHOST, MYSQL_DBUSER, MYSQL_DBPASSWORD, MYSQL_DBNAME ) + MYSQL_DATABASE = ( MYSQL_DBHOST, MYSQL_DBUSER, MYSQL_DBPASSWORD, + MYSQL_DBNAME ) Fill in the first four entries with values for your local MySQL installation -before running "roundup-admin initialise". - - - Andrey Lebedev - +before running "roundup-admin initialise". Use the commands in the `Running the +MySQL tests` to set up a database if you have privilege, or ask your local +administrator if not. - vim: et tw=80 diff --git a/roundup/backends/back_mysql.py b/roundup/backends/back_mysql.py index 0e1d085..130327b 100644 --- a/roundup/backends/back_mysql.py +++ b/roundup/backends/back_mysql.py @@ -19,19 +19,22 @@ class Maintenance: def db_nuke(self, config): """Clear all database contents and drop database itself""" db = Database(config, 'admin') + db.sql_commit() db.sql("DROP DATABASE %s" % config.MYSQL_DBNAME) db.sql("CREATE DATABASE %s" % config.MYSQL_DBNAME) if os.path.exists(config.DATABASE): shutil.rmtree(config.DATABASE) - + def db_exists(self, config): """Check if database already exists""" # Yes, this is a hack, but we must must open connection without # selecting a database to prevent creation of some tables - config.MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, config.MYSQL_DBPASSWORD) + config.MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, + config.MYSQL_DBPASSWORD) db = Database(config, 'admin') db.conn.select_db(config.MYSQL_DBNAME) - config.MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, config.MYSQL_DBPASSWORD, config.MYSQL_DBNAME) + config.MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, + config.MYSQL_DBPASSWORD, config.MYSQL_DBNAME) db.sql("SHOW TABLES") tables = db.sql_fetchall() if tables or os.path.exists(config.DATABASE): @@ -40,6 +43,10 @@ class Maintenance: class Database(Database): arg = '%s' + + # backend for MySQL to use + mysql_backend = 'InnoDB' + #mysql_backend = 'BDB' # much slower, only use if you have no choice def open_connection(self): db = getattr(self.config, 'MYSQL_DATABASE') @@ -61,15 +68,16 @@ class Database(Database): if message[0] != ER.NO_SUCH_TABLE: raise DatabaseError, message self.database_schema = {} - self.sql("CREATE TABLE schema (schema TEXT) TYPE=BDB") - self.sql("CREATE TABLE ids (name varchar(255), num INT) TYPE=BDB") + self.sql("CREATE TABLE schema (schema TEXT) TYPE=%s"% + self.mysql_backend) + # TODO: use AUTO_INCREMENT for generating ids: + # http://www.mysql.com/doc/en/CREATE_TABLE.html + self.sql("CREATE TABLE ids (name varchar(255), num INT) TYPE=%s"% + self.mysql_backend) self.sql("CREATE INDEX ids_name_idx on ids(name)") - + def close(self): - try: - self.conn.close() - except MySQLdb.OperationalError, message: - raise + self.conn.close() def __repr__(self): return ''%id(self) @@ -120,7 +128,8 @@ class Database(Database): cols.append('id') cols.append('__retired__') scols = ',' . join(['`%s` VARCHAR(255)'%x for x in cols]) - sql = 'CREATE TABLE `_%s` (%s) TYPE=BDB'%(spec.classname, scols) + sql = 'CREATE TABLE `_%s` (%s) TYPE=%s'%(spec.classname, scols, + self.mysql_backend) if __debug__: print >>hyperdb.DEBUG, 'create_class', (self, sql) self.cursor.execute(sql) @@ -129,14 +138,16 @@ class Database(Database): def create_journal_table(self, spec): cols = ',' . join(['`%s` VARCHAR(255)'%x for x in 'nodeid date tag action params' . split()]) - sql = 'CREATE TABLE `%s__journal` (%s) TYPE=BDB'%(spec.classname, cols) + sql = 'CREATE TABLE `%s__journal` (%s) TYPE=%s'%(spec.classname, + cols, self.mysql_backend) if __debug__: print >>hyperdb.DEBUG, 'create_class', (self, sql) self.cursor.execute(sql) def create_multilink_table(self, spec, ml): sql = '''CREATE TABLE `%s_%s` (linkid VARCHAR(255), - nodeid VARCHAR(255)) TYPE=BDB'''%(spec.classname, ml) + nodeid VARCHAR(255)) TYPE=%s'''%(spec.classname, ml, + self.mysql_backend) if __debug__: print >>hyperdb.DEBUG, 'create_class', (self, sql) self.cursor.execute(sql) @@ -146,73 +157,212 @@ class Database(Database): exists = Maintenance().db_exists class MysqlClass: - def find(self, **propspec): - '''Get the ids of nodes in this class which link to the given nodes. + # we're overriding this method for ONE missing bit of functionality. + # look for "I can't believe it's not a toy RDBMS" below + def filter(self, search_matches, filterspec, sort=(None,None), + group=(None,None)): + ''' Return a list of the ids of the active nodes in this class that + match the 'filter' spec, sorted by the group spec and then the + sort spec - Since MySQL < 4.0.0 does not support unions, so we overrideg this - method without using this keyword + "filterspec" is {propname: value(s)} + "sort" and "group" are (dir, prop) where dir is '+', '-' or None + and prop is a prop name or None + "search_matches" is {nodeid: marker} + The filter must match all properties specificed - but if the + property value to match is a list, any one of the values in the + list may match for that property to match. ''' - if __debug__: - print >>hyperdb.DEBUG, 'find', (self, propspec) - - # shortcut - if not propspec: + # just don't bother if the full-text search matched diddly + if search_matches == {}: return [] - # validate the args + cn = self.classname + + timezone = self.db.getUserTimezone() + + # figure the WHERE clause from the filterspec props = self.getprops() - propspec = propspec.items() - for propname, nodeids in propspec: - # check the prop is OK - prop = props[propname] - if not isinstance(prop, Link) and not isinstance(prop, Multilink): - raise TypeError, "'%s' not a Link/Multilink property"%propname - - # first, links - l = [] + frum = ['_'+cn] where = [] - allvalues = () + args = [] a = self.db.arg - for prop, values in propspec: - if not isinstance(props[prop], hyperdb.Link): - continue - if type(values) is type(''): - allvalues += (values,) - where.append('_%s = %s'%(prop, a)) + for k, v in filterspec.items(): + propclass = props[k] + # now do other where clause stuff + if isinstance(propclass, Multilink): + tn = '%s_%s'%(cn, k) + if v in ('-1', ['-1']): + # only match rows that have count(linkid)=0 in the + # corresponding multilink table) + + # "I can't believe it's not a toy RDBMS" + # see, even toy RDBMSes like gadfly and sqlite can do + # sub-selects... + self.db.sql('select nodeid from %s'%tn) + s = ','.join([x[0] for x in self.db.sql_fetchall()]) + + where.append('id not in (%s)'%s) + elif isinstance(v, type([])): + frum.append(tn) + s = ','.join([a for x in v]) + where.append('id=%s.nodeid and %s.linkid in (%s)'%(tn,tn,s)) + args = args + v + else: + frum.append(tn) + where.append('id=%s.nodeid and %s.linkid=%s'%(tn, tn, a)) + args.append(v) + elif k == 'id': + if isinstance(v, type([])): + s = ','.join([a for x in v]) + where.append('%s in (%s)'%(k, s)) + args = args + v + else: + where.append('%s=%s'%(k, a)) + args.append(v) + elif isinstance(propclass, String): + if not isinstance(v, type([])): + v = [v] + + # Quote the bits in the string that need it and then embed + # in a "substring" search. Note - need to quote the '%' so + # they make it through the python layer happily + v = ['%%'+self.db.sql_stringquote(s)+'%%' for s in v] + + # now add to the where clause + where.append(' or '.join(["_%s LIKE '%s'"%(k, s) for s in v])) + # note: args are embedded in the query string now + elif isinstance(propclass, Link): + if isinstance(v, type([])): + if '-1' in v: + v = v[:] + v.remove('-1') + xtra = ' or _%s is NULL'%k + else: + xtra = '' + if v: + s = ','.join([a for x in v]) + where.append('(_%s in (%s)%s)'%(k, s, xtra)) + args = args + v + else: + where.append('_%s is NULL'%k) + else: + if v == '-1': + v = None + where.append('_%s is NULL'%k) + else: + where.append('_%s=%s'%(k, a)) + args.append(v) + elif isinstance(propclass, Date): + if isinstance(v, type([])): + s = ','.join([a for x in v]) + where.append('_%s in (%s)'%(k, s)) + args = args + [date.Date(x).serialise() for x in v] + else: + try: + # Try to filter on range of dates + date_rng = Range(v, date.Date, offset=timezone) + if (date_rng.from_value): + where.append('_%s >= %s'%(k, a)) + args.append(date_rng.from_value.serialise()) + if (date_rng.to_value): + where.append('_%s <= %s'%(k, a)) + args.append(date_rng.to_value.serialise()) + except ValueError: + # If range creation fails - ignore that search parameter + pass + elif isinstance(propclass, Interval): + if isinstance(v, type([])): + s = ','.join([a for x in v]) + where.append('_%s in (%s)'%(k, s)) + args = args + [date.Interval(x).serialise() for x in v] + else: + try: + # Try to filter on range of intervals + date_rng = Range(v, date.Interval) + if (date_rng.from_value): + where.append('_%s >= %s'%(k, a)) + args.append(date_rng.from_value.serialise()) + if (date_rng.to_value): + where.append('_%s <= %s'%(k, a)) + args.append(date_rng.to_value.serialise()) + except ValueError: + # If range creation fails - ignore that search parameter + pass + #where.append('_%s=%s'%(k, a)) + #args.append(date.Interval(v).serialise()) else: - allvalues += tuple(values.keys()) - where.append('_%s in (%s)'%(prop, ','.join([a]*len(values)))) - tables = [] - if where: - self.db.sql('select id as nodeid from _%s where %s' % (self.classname, ' and '.join(where)), allvalues) - l += [x[0] for x in self.db.sql_fetchall()] - - # now multilinks - for prop, values in propspec: - vals = () - if not isinstance(props[prop], hyperdb.Multilink): - continue - if type(values) is type(''): - vals = (values,) - s = a + if isinstance(v, type([])): + s = ','.join([a for x in v]) + where.append('_%s in (%s)'%(k, s)) + args = args + v + else: + where.append('_%s=%s'%(k, a)) + args.append(v) + + # don't match retired nodes + where.append('__retired__ <> 1') + + # add results of full text search + if search_matches is not None: + v = search_matches.keys() + s = ','.join([a for x in v]) + where.append('id in (%s)'%s) + args = args + v + + # "grouping" is just the first-order sorting in the SQL fetch + # can modify it...) + orderby = [] + ordercols = [] + if group[0] is not None and group[1] is not None: + if group[0] != '-': + orderby.append('_'+group[1]) + ordercols.append('_'+group[1]) else: - vals = tuple(values.keys()) - s = ','.join([a]*len(values)) - query = 'select nodeid from %s_%s where linkid in (%s)'%( - self.classname, prop, s) - self.db.sql(query, vals) - l += [x[0] for x in self.db.sql_fetchall()] - if __debug__: - print >>hyperdb.DEBUG, 'find ... ', l + orderby.append('_'+group[1]+' desc') + ordercols.append('_'+group[1]) + + # now add in the sorting + group = '' + if sort[0] is not None and sort[1] is not None: + direction, colname = sort + if direction != '-': + if colname == 'id': + orderby.append(colname) + else: + orderby.append('_'+colname) + ordercols.append('_'+colname) + else: + if colname == 'id': + orderby.append(colname+' desc') + ordercols.append(colname) + else: + orderby.append('_'+colname+' desc') + ordercols.append('_'+colname) - # Remove duplicated ids - d = {} - for k in l: - d[k] = 1 - return d.keys() + # construct the SQL + frum = ','.join(frum) + if where: + where = ' where ' + (' and '.join(where)) + else: + where = '' + cols = ['id'] + if orderby: + cols = cols + ordercols + order = ' order by %s'%(','.join(orderby)) + else: + order = '' + cols = ','.join(cols) + sql = 'select %s from %s %s%s%s'%(cols, frum, where, group, order) + args = tuple(args) + if __debug__: + print >>hyperdb.DEBUG, 'filter', (self, sql, args) + self.db.cursor.execute(sql, args) + l = self.db.cursor.fetchall() - return l + # return the IDs (the first column) + return [row[0] for row in l] class Class(MysqlClass, rdbms_common.Class): pass diff --git a/test/test_db.py b/test/test_db.py index 9b16434..8735f79 100644 --- a/test/test_db.py +++ b/test/test_db.py @@ -15,7 +15,7 @@ # BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, # SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. # -# $Id: test_db.py,v 1.91 2003-09-04 00:47:01 richard Exp $ +# $Id: test_db.py,v 1.92 2003-10-07 08:52:12 richard Exp $ import unittest, os, shutil, time @@ -65,6 +65,7 @@ class config: ANONYMOUS_REGISTER = 'deny' # either 'deny' or 'allow' MESSAGES_TO_AUTHOR = 'no' # either 'yes' or 'no' EMAIL_SIGNATURE_POSITION = 'bottom' + # Mysql connection data MYSQL_DBHOST = 'localhost' MYSQL_DBUSER = 'rounduptest' @@ -72,6 +73,16 @@ class config: MYSQL_DBNAME = 'rounduptest' MYSQL_DATABASE = (MYSQL_DBHOST, MYSQL_DBUSER, MYSQL_DBPASSWORD, MYSQL_DBNAME) + # Postgresql connection data + POSTGRESQL_DBHOST = 'localhost' + POSTGRESQL_DBUSER = 'rounduptest' + POSTGRESQL_DBPASSWORD = 'rounduptest' + POSTGRESQL_DBNAME = 'rounduptest' + POSTGRESQL_PORT = 5432 + POSTGRESQL_DATABASE = {'host': POSTGRESQL_DBHOST, 'port': POSTGRESQL_PORT, + 'user': POSTGRESQL_DBUSER, 'password': POSTGRESQL_DBPASSWORD, + 'database': POSTGRESQL_DBNAME} + class nodbconfig(config): MYSQL_DATABASE = (config.MYSQL_DBHOST, config.MYSQL_DBUSER, config.MYSQL_DBPASSWORD) @@ -842,6 +853,37 @@ class mysqlReadOnlyDBTestCase(anydbmReadOnlyDBTestCase): self.db.close() mysql.Database.nuke(config) +class postgresqlDBTestCase(anydbmDBTestCase): + def setUp(self): + from roundup.backends import postgresql + # remove previous test, ignore errors + if os.path.exists(config.DATABASE): + shutil.rmtree(config.DATABASE) + os.makedirs(config.DATABASE + '/files') + # open database for testing + self.db = postgresql.Database(config, 'admin') + setupSchema(self.db, 1, mysql) + + def tearDown(self): + from roundup.backends import postgresql + self.db.close() + postgresql.Database.nuke(config) + +class postgresqlReadOnlyDBTestCase(anydbmReadOnlyDBTestCase): + def setUp(self): + from roundup.backends import postgresql + # remove previous test, ignore errors + if os.path.exists(config.DATABASE): + shutil.rmtree(config.DATABASE) + os.makedirs(config.DATABASE + '/files') + self.db = postgresql.Database(config) + setupSchema(self.db, 0, mysql) + + def tearDown(self): + from roundup.backends import postgresql + self.db.close() + postgresql.Database.nuke(config) + class sqliteDBTestCase(anydbmDBTestCase): def setUp(self): from roundup.backends import sqlite @@ -940,13 +982,14 @@ def suite(): p = [] l = [ - unittest.makeSuite(anydbmDBTestCase, 'test'), - unittest.makeSuite(anydbmReadOnlyDBTestCase, 'test') + # unittest.makeSuite(anydbmDBTestCase, 'test'), + # unittest.makeSuite(anydbmReadOnlyDBTestCase, 'test') ] p.append('anydbm') # return unittest.TestSuite(l) from roundup import backends + if hasattr(backends, 'mysql'): from roundup.backends import mysql try: @@ -969,6 +1012,30 @@ def suite(): p.append('mysql') l.append(unittest.makeSuite(mysqlDBTestCase, 'test')) l.append(unittest.makeSuite(mysqlReadOnlyDBTestCase, 'test')) +# return unittest.TestSuite(l) + + if hasattr(backends, 'postgresql'): + from roundup.backends import postgresql + try: + # Check if we can run mysql tests + import psycopg + db = psycopg.Database(nodbconfig, 'admin') + db.conn.select_db(config.POSTGRESQL_DBNAME) + db.sql("SHOW TABLES"); + tables = db.sql_fetchall() + if tables: + # Database should be empty. We don't dare to delete any data + raise DatabaseError, "(Database %s contains tables)"%config.POSTGRESQL_DBNAME + db.sql("DROP DATABASE %s" % config.POSTGRESQL_DBNAME) + db.sql("CREATE DATABASE %s" % config.POSTGRESQL_DBNAME) + db.close() + except (MySQLdb.ProgrammingError, DatabaseError), msg: + print "Warning! Postgresql tests will not be performed", msg + print "See doc/postgresql.txt for more details." + else: + p.append('postgresql') + l.append(unittest.makeSuite(postgresqlDBTestCase, 'test')) + l.append(unittest.makeSuite(postgresqlReadOnlyDBTestCase, 'test')) #return unittest.TestSuite(l) if hasattr(backends, 'sqlite'): -- 2.30.2