summary | shortlog | log | commit | commitdiff | tree
raw | patch | inline | side by side (parent: 395ea55)
raw | patch | inline | side by side (parent: 395ea55)
author | richard <richard@57a73879-2fb5-44c3-a270-3262357dd7e2> | |
Thu, 12 Dec 2002 09:31:04 +0000 (09:31 +0000) | ||
committer | richard <richard@57a73879-2fb5-44c3-a270-3262357dd7e2> | |
Thu, 12 Dec 2002 09:31:04 +0000 (09:31 +0000) |
git-svn-id: http://svn.roundup-tracker.org/svnroot/roundup/trunk@1408 57a73879-2fb5-44c3-a270-3262357dd7e2
diff --git a/CHANGES.txt b/CHANGES.txt
index 4e63d6db1bced986108a028e6d1ccb43604752f4..a949424eb0f902fff3b4cc6842db0e6dc9a1e20a 100644 (file)
--- a/CHANGES.txt
+++ b/CHANGES.txt
2003-01-?? 0.5.4
- key the templates cache off full path, not filename
+- implemented whole-database locking
2002-12-11 0.5.3
index 61606aeba579fc5761e5d768b319a4059c51b2cd..a199d1eb7629ba542998b22a6d5d7b2c9b824149 100644 (file)
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-#$Id: back_anydbm.py,v 1.94 2002-12-11 01:03:38 richard Exp $
+#$Id: back_anydbm.py,v 1.95 2002-12-12 09:31:04 richard Exp $
'''
This module defines a backend that saves the hyperdatabase in a database
chosen by anydbm. It is guaranteed to always be available in python
from blobfiles import FileStorage
from sessions import Sessions
from roundup.indexer import Indexer
-from locking import acquire_lock, release_lock
+from roundup.backends import locking
from roundup.hyperdb import String, Password, Date, Interval, Link, \
Multilink, DatabaseError, Boolean, Number
# ensure files are group readable and writable
os.umask(0002)
+ # lock it
+ lockfilenm = os.path.join(self.dir, 'lock')
+ self.lockfile = locking.acquire_lock(lockfilenm)
+ self.lockfile.write(str(os.getpid()))
+ self.lockfile.flush()
+
def post_init(self):
''' Called once the schema initialisation has finished.
'''
mode)
return dbm.open(path, mode)
- def lockdb(self, name):
- ''' Lock a database file
- '''
- path = os.path.join(os.getcwd(), self.dir, '%s.lock'%name)
- return acquire_lock(path)
-
#
# Node IDs
#
''' Generate a new id for the given class
'''
# open the ids DB - create if if doesn't exist
- lock = self.lockdb('_ids')
db = self.opendb('_ids', 'c')
if db.has_key(classname):
newid = db[classname] = str(int(db[classname]) + 1)
newid = str(self.getclass(classname).count()+1)
db[classname] = newid
db.close()
- release_lock(lock)
return newid
def setid(self, classname, setid):
''' Set the id counter: used during import of database
'''
# open the ids DB - create if if doesn't exist
- lock = self.lockdb('_ids')
db = self.opendb('_ids', 'c')
db[classname] = str(setid)
db.close()
- release_lock(lock)
#
# Nodes
def close(self):
''' Nothing to do
'''
- pass
+ if self.lockfile is not None:
+ locking.release_lock(self.lockfile)
+ if self.lockfile is not None:
+ self.lockfile.close()
+ self.lockfile = None
_marker = []
class Class(hyperdb.Class):
def destroy(self, nodeid):
'''Destroy a node.
-
+
WARNING: this method should never be used except in extremely rare
situations where there could never be links to the node being
deleted
def get(self, nodeid, propname, default=_marker, cache=1):
''' trap the content propname and get it from the file
'''
-
- poss_msg = 'Possibly a access right configuration problem.'
+ poss_msg = 'Possibly an access right configuration problem.'
if propname == 'content':
try:
return self.db.getfile(self.classname, nodeid, None)
except IOError, (strerror):
- # BUG: by catching this we donot see an error in the log.
+ # XXX by catching this we donot see an error in the log.
return 'ERROR reading file: %s%s\n%s\n%s'%(
self.classname, nodeid, poss_msg, strerror)
if default is not _marker:
index 62dfee21a6551ce413413d7fd4e5be86bd8795f2..1d25b5e90c90a3cec8557be4c8e7fcac5ec13518 100644 (file)
-# $Id: back_gadfly.py,v 1.29 2002-10-07 00:52:51 richard Exp $
+# $Id: back_gadfly.py,v 1.30 2002-12-12 09:31:04 richard Exp $
''' Gadlfy relational database hypderb backend.
About Gadfly
from roundup import hyperdb, date, password, roundupdb, security
from roundup.hyperdb import String, Password, Date, Interval, Link, \
Multilink, DatabaseError, Boolean, Number
+from roundup.backends import locking
# basic RDBMS backen implementation
from roundup.backends import rdbms_common
def open_connection(self):
db = getattr(self.config, 'GADFLY_DATABASE', ('database', self.dir))
+
+ # lock it
+ lockfilenm = os.path.join(db[1], db[0]) + '.lck'
+ self.lockfile = locking.acquire_lock(lockfilenm)
+ self.lockfile.write(str(os.getpid()))
+ self.lockfile.flush()
+
if len(db) == 2:
# ensure files are group readable and writable
os.umask(0002)
index c8e220d8cfdbbafe927eb65854264e25580665ad..6f940f79303226f2021315f84263037b48c22dae 100755 (executable)
for nodeid in klass.list():
klass.index(nodeid)
self.indexer.save_index()
-
-
+
# --- defined in ping's spec
def __getattr__(self, classname):
if classname == 'curuserid':
'activity' : hyperdb.Date(),
'creation' : hyperdb.Date(),
'creator' : hyperdb.Link('user') }
- self.auditors = {'create': [], 'set': [], 'retire': []} # event -> list of callables
- self.reactors = {'create': [], 'set': [], 'retire': []} # ditto
+
+ # event -> list of callables
+ self.auditors = {'create': [], 'set': [], 'retire': []}
+ self.reactors = {'create': [], 'set': [], 'retire': []}
+
view = self.__getview()
self.maxid = 1
if view:
self.maxid = view[-1].id + 1
self.uncommitted = {}
self.rbactions = []
+
# people reach inside!!
self.properties = self.ruprops
self.db.addclass(self)
l = self.reactors[event]
if detector not in l:
self.reactors[event].append(detector)
+
# --- the hyperdb.Class methods
def create(self, **propvalues):
self.fireAuditors('create', None, propvalues)
if self.db.journaltag is None:
raise hyperdb.DatabaseError, 'Database open read-only'
view = self.getview(1)
+
# node must exist & not be retired
id = int(nodeid)
ndx = view.find(id=id)
setattr(row, key, v)
changes[key] = oldvalue
propvalues[key] = value
-
+
elif isinstance(prop, hyperdb.Boolean):
if value is None:
bv = 0
iv.delete(ndx)
self.db.dirty = 1
self.fireReactors('retire', nodeid, None)
+
def history(self, nodeid):
if not self.do_journal:
raise ValueError, 'Journalling is disabled for this class'
return self.db.getjournal(self.classname, nodeid)
+
def setkey(self, propname):
if self.keyname:
if propname == self.keyname:
return
- raise ValueError, "%s already indexed on %s" % (self.classname, self.keyname)
+ raise ValueError, "%s already indexed on %s"%(self.classname,
+ self.keyname)
prop = self.properties.get(propname, None)
if prop is None:
prop = self.privateprops.get(propname, None)
raise KeyError, "no property %s" % propname
if not isinstance(prop, hyperdb.String):
raise TypeError, "%s is not a String" % propname
+
# first setkey for this run
self.keyname = propname
iv = self.db._db.view('_%s' % self.classname)
if self.db.fastopen and iv.structure():
return
+
# very first setkey ever
self.db.dirty = 1
iv = self.db._db.getas('_%s[k:S,i:I]' % self.classname)
iv = iv.ordered(1)
-# print "setkey building index"
for row in self.getview():
iv.append(k=getattr(row, propname), i=row.id)
self.db.commit()
+
def getkey(self):
return self.keyname
+
def lookup(self, keyvalue):
if type(keyvalue) is not _STRINGTYPE:
raise TypeError, "%r is not a string" % keyvalue
for row in self.getview().select(_isdel=0):
l.append(str(row.id))
return l
+
def count(self):
return len(self.getview())
+
def getprops(self, protected=1):
# protected is not in ping's spec
allprops = self.ruprops.copy()
if protected and self.privateprops is not None:
allprops.update(self.privateprops)
return allprops
+
def addprop(self, **properties):
for key in properties.keys():
if self.ruprops.has_key(key):
- raise ValueError, "%s is already a property of %s" % (key, self.classname)
+ raise ValueError, "%s is already a property of %s"%(key,
+ self.classname)
self.ruprops.update(properties)
self.db.fastopen = 0
view = self.__getview()
self.db.commit()
# ---- end of ping's spec
+
def filter(self, search_matches, filterspec, sort=(None,None),
group=(None,None)):
# search_matches is None or a set (dict of {nodeid: {propname:[nodeid,...]}})
# filterspec is a dict {propname:value}
# sort and group are (dir, prop) where dir is '+', '-' or None
# and prop is a prop name or None
-
where = {'_isdel':0}
mlcriteria = {}
regexes = {}
if where:
v = v.select(where)
#print "filter where at %s" % time.time()
-
+
if mlcriteria:
- # multilink - if any of the nodeids required by the
- # filterspec aren't in this node's property, then skip
- # it
+ # multilink - if any of the nodeids required by the
+ # filterspec aren't in this node's property, then skip it
def ff(row, ml=mlcriteria):
for propname, values in ml.items():
sv = getattr(row, propname)
props = props.keys()
props.sort()
return props[0]
+
def stringFind(self, **requirements):
"""Locate a particular node by matching a set of its String
properties in a caseless search.
view.append(d)
creator = d.get('creator', None)
creation = d.get('creation', None)
- self.db.addjournal(self.classname, newid, 'create', {}, creator, creation)
+ self.db.addjournal(self.classname, newid, 'create', {}, creator,
+ creation)
return newid
# --- used by Database
hyperdb.Number : 'I',
}
class FileClass(Class):
- ' like Class but with a content property '
+ ''' like Class but with a content property
+ '''
default_mime_type = 'text/plain'
def __init__(self, db, classname, **properties):
properties['content'] = FileName()
if not properties.has_key('type'):
properties['type'] = hyperdb.String()
Class.__init__(self, db, classname, **properties)
+
def get(self, nodeid, propname, default=_marker, cache=1):
x = Class.get(self, nodeid, propname, default, cache)
if propname == 'content':
except Exception, e:
x = repr(e)
return x
+
def create(self, **propvalues):
content = propvalues['content']
del propvalues['content']
open(nm, 'wb').write(content)
self.set(newid, content = 'file:'+nm)
mimetype = propvalues.get('type', self.default_mime_type)
- self.db.indexer.add_text((self.classname, newid, 'content'), content, mimetype)
+ self.db.indexer.add_text((self.classname, newid, 'content'), content,
+ mimetype)
def undo(fnm=nm, action1=os.remove, indexer=self.db.indexer):
action1(fnm)
self.rollbackaction(undo)
return newid
+
def index(self, nodeid):
Class.index(self, nodeid)
mimetype = self.get(nodeid, 'type')
self.get(nodeid, 'content'), mimetype)
class IssueClass(Class, roundupdb.IssueClass):
- # Overridden methods:
- def __init__(self, db, classname, **properties):
- """The newly-created class automatically includes the "messages",
+ ''' The newly-created class automatically includes the "messages",
"files", "nosy", and "superseder" properties. If the 'properties'
dictionary attempts to specify any of these properties or a
- "creation" or "activity" property, a ValueError is raised."""
+ "creation" or "activity" property, a ValueError is raised.
+ '''
+ def __init__(self, db, classname, **properties):
if not properties.has_key('title'):
properties['title'] = hyperdb.String(indexme='yes')
if not properties.has_key('messages'):
self.reindex = 1
self.changed = 0
self.propcache = {}
+
def force_reindex(self):
v = self.db.view('ids')
v[:] = []
v[:] = []
self.db.commit()
self.reindex = 1
+
def should_reindex(self):
return self.reindex
+
def _getprops(self, classname):
props = self.propcache.get(classname, None)
if props is None:
props = [prop.name for prop in props]
self.propcache[classname] = props
return props
+
def _getpropid(self, classname, propname):
return self._getprops(classname).index(propname)
+
def _getpropname(self, classname, propid):
return self._getprops(classname)[propid]
property = self._getpropname(classname, hit.propid)
rslt[i] = (classname, nodeid, property)
return rslt
+
def save_index(self):
if self.changed:
self.db.commit()
self.changed = 0
+
def rollback(self):
if self.changed:
self.db.rollback()
self.db = metakit.storage(self.path, 1)
self.changed = 0
+
index 53be129b1da31f9c80ff396e17cc5f1f5c21be68..69d258523dbd1e335b435b710c0775b18bce85b2 100644 (file)
-# $Id: back_sqlite.py,v 1.7 2002-10-08 04:11:16 richard Exp $
+# $Id: back_sqlite.py,v 1.8 2002-12-12 09:31:04 richard Exp $
__doc__ = '''
See https://pysqlite.sourceforge.net/ for pysqlite info
'''
import base64, marshal
from roundup.backends.rdbms_common import *
+from roundup.backends import locking
import sqlite
class Database(Database):
# ensure files are group readable and writable
os.umask(0002)
db = os.path.join(self.config.DATABASE, 'db')
+
+ # lock it
+ lockfilenm = db[:-3] + 'lck'
+ self.lockfile = locking.acquire_lock(lockfilenm)
+ self.lockfile.write(str(os.getpid()))
+ self.lockfile.flush()
+
self.conn = sqlite.connect(db=db)
self.cursor = self.conn.cursor()
try:
if str(value) != 'close failed - Connection is closed.':
raise
+ # release the lock too
+ if self.lockfile is not None:
+ locking.release_lock(self.lockfile)
+ if self.lockfile is not None:
+ self.lockfile.close()
+ self.lockfile = None
def rollback(self):
''' Reverse all actions from the current transaction.
index cfe27b3563d8c467fad34dfbb00bed83a237c496..b0c466cb43bb343fccc2dabf70146fe43fc36fd0 100644 (file)
-# $Id: rdbms_common.py,v 1.24 2002-11-06 11:38:42 richard Exp $
+# $Id: rdbms_common.py,v 1.25 2002-12-12 09:31:04 richard Exp $
''' Relational database (SQL) backend common code.
Basics:
from roundup import hyperdb, date, password, roundupdb, security
from roundup.hyperdb import String, Password, Date, Interval, Link, \
Multilink, DatabaseError, Boolean, Number
+from roundup.backends import locking
# support
from blobfiles import FileStorage
self.cache = {}
self.cache_lru = []
+ # database lock
+ self.lockfile = None
+
# open a connection to the database, creating the "conn" attribute
self.open_connection()
''' Close off the connection.
'''
self.conn.close()
+ if self.lockfile is not None:
+ locking.release_lock(self.lockfile)
+ if self.lockfile is not None:
+ self.lockfile.close()
+ self.lockfile = None
#
# The base Class class
diff --git a/test/test_db.py b/test/test_db.py
index 939f3d245402b48978c3633411dc8f49f29b7f46..f6077ccc6954214a9827677c54f1484f8ee3900c 100644 (file)
--- a/test/test_db.py
+++ b/test/test_db.py
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-# $Id: test_db.py,v 1.62 2002-11-06 11:45:25 richard Exp $
+# $Id: test_db.py,v 1.63 2002-12-12 09:31:04 richard Exp $
import unittest, os, shutil, time
class MyTestCase(unittest.TestCase):
def tearDown(self):
self.db.close()
- if hasattr(self, 'db2'):
- self.db2.close()
if os.path.exists('_test_dir'):
shutil.rmtree('_test_dir')
os.makedirs(config.DATABASE + '/files')
self.db = anydbm.Database(config, 'admin')
setupSchema(self.db, 1, anydbm)
- self.db2 = anydbm.Database(config, 'admin')
- setupSchema(self.db2, 0, anydbm)
+
+ def testIDGeneration(self):
+ id1 = self.db.issue.create(title="spam", status='1')
+ id2 = self.db.issue.create(title="eggs", status='2')
+ self.assertNotEqual(id1, id2)
def testStringChange(self):
for commit in (0,1):
# we should have the create and last set entries now
self.assertEqual(jlen-1, len(self.db.getjournal('issue', id)))
- def testIDGeneration(self):
- id1 = self.db.issue.create(title="spam", status='1')
- id2 = self.db2.issue.create(title="eggs", status='2')
- self.assertNotEqual(id1, id2)
-
def testSearching(self):
self.db.file.create(content='hello', type="text/plain")
self.db.file.create(content='world', type="text/frozz",
os.makedirs(config.DATABASE + '/files')
db = anydbm.Database(config, 'admin')
setupSchema(db, 1, anydbm)
+ db.close()
self.db = anydbm.Database(config)
setupSchema(self.db, 0, anydbm)
- self.db2 = anydbm.Database(config, 'admin')
- setupSchema(self.db2, 0, anydbm)
def testExceptions(self):
# this tests the exceptions that should be raised
os.makedirs(config.DATABASE + '/files')
self.db = bsddb.Database(config, 'admin')
setupSchema(self.db, 1, bsddb)
- self.db2 = bsddb.Database(config, 'admin')
- setupSchema(self.db2, 0, bsddb)
class bsddbReadOnlyDBTestCase(anydbmReadOnlyDBTestCase):
def setUp(self):
os.makedirs(config.DATABASE + '/files')
db = bsddb.Database(config, 'admin')
setupSchema(db, 1, bsddb)
+ db.close()
self.db = bsddb.Database(config)
setupSchema(self.db, 0, bsddb)
- self.db2 = bsddb.Database(config, 'admin')
- setupSchema(self.db2, 0, bsddb)
class bsddb3DBTestCase(anydbmDBTestCase):
os.makedirs(config.DATABASE + '/files')
self.db = bsddb3.Database(config, 'admin')
setupSchema(self.db, 1, bsddb3)
- self.db2 = bsddb3.Database(config, 'admin')
- setupSchema(self.db2, 0, bsddb3)
class bsddb3ReadOnlyDBTestCase(anydbmReadOnlyDBTestCase):
def setUp(self):
os.makedirs(config.DATABASE + '/files')
db = bsddb3.Database(config, 'admin')
setupSchema(db, 1, bsddb3)
+ db.close()
self.db = bsddb3.Database(config)
setupSchema(self.db, 0, bsddb3)
- self.db2 = bsddb3.Database(config, 'admin')
- setupSchema(self.db2, 0, bsddb3)
class gadflyDBTestCase(anydbmDBTestCase):
self.db = gadfly.Database(config, 'admin')
setupSchema(self.db, 1, gadfly)
- def testIDGeneration(self):
- id1 = self.db.issue.create(title="spam", status='1')
- id2 = self.db.issue.create(title="eggs", status='2')
- self.assertNotEqual(id1, id2)
-
def testFilteringString(self):
ae, filt = self.filteringSetup()
ae(filt(None, {'title': 'issue one'}, ('+','id'), (None,None)), ['1'])
os.makedirs(config.DATABASE + '/files')
db = gadfly.Database(config, 'admin')
setupSchema(db, 1, gadfly)
+ db.close()
self.db = gadfly.Database(config)
setupSchema(self.db, 0, gadfly)
os.makedirs(config.DATABASE + '/files')
db = sqlite.Database(config, 'admin')
setupSchema(db, 1, sqlite)
+ db.close()
self.db = sqlite.Database(config)
setupSchema(self.db, 0, sqlite)
self.db = metakit.Database(config, 'admin')
setupSchema(self.db, 1, metakit)
- def testIDGeneration(self):
- id1 = self.db.issue.create(title="spam", status='1')
- id2 = self.db.issue.create(title="eggs", status='2')
- self.assertNotEqual(id1, id2)
-
def testTransactions(self):
# remember the number of items we started
num_issues = len(self.db.issue.list())
os.makedirs(config.DATABASE + '/files')
db = metakit.Database(config, 'admin')
setupSchema(db, 1, metakit)
+ db.close()
self.db = metakit.Database(config)
setupSchema(self.db, 0, metakit)