index 6071940a540e376e10016dcae4b6980eb74bbd1b..7c95841eeca0f54bd23b84f0d19f7d6ca65456be 100644 (file)
# BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
#
-#$Id: back_anydbm.py,v 1.21 2002-01-02 02:31:38 richard Exp $
+#$Id: back_anydbm.py,v 1.29 2002-02-25 14:34:31 grubert Exp $
'''
This module defines a backend that saves the hyperdatabase in a database
chosen by anydbm. It is guaranteed to always be available in python
import whichdb, anydbm, os, marshal
from roundup import hyperdb, date, password
-
-DEBUG=os.environ.get('HYPERDBDEBUG', '')
+from blobfiles import FileStorage
#
# Now the database
#
-class Database(hyperdb.Database):
+class Database(FileStorage, hyperdb.Database):
"""A database for storing records containing flexible data types.
Transaction stuff TODO:
. perhaps detect write collisions (related to above)?
"""
- def __init__(self, storagelocator, journaltag=None):
+ def __init__(self, config, journaltag=None):
"""Open a hyperdatabase given a specifier to some storage.
+ The 'storagelocator' is obtained from config.DATABASE.
The meaning of 'storagelocator' depends on the particular
implementation of the hyperdatabase. It could be a file name,
a directory path, a socket descriptor for a connection to a
None, the database is opened in read-only mode: the Class.create(),
Class.set(), and Class.retire() methods are disabled.
"""
- self.dir, self.journaltag = storagelocator, journaltag
+ self.config, self.journaltag = config, journaltag
+ self.dir = config.DATABASE
self.classes = {}
self.cache = {} # cache of nodes loaded or created
self.dirtynodes = {} # keep track of the dirty nodes by class
def __getattr__(self, classname):
"""A convenient way of calling self.getclass(classname)."""
if self.classes.has_key(classname):
- if DEBUG:
+ if hyperdb.DEBUG:
print '__getattr__', (self, classname)
return self.classes[classname]
raise AttributeError, classname
def addclass(self, cl):
- if DEBUG:
+ if hyperdb.DEBUG:
print 'addclass', (self, cl)
cn = cl.classname
if self.classes.has_key(cn):
def getclasses(self):
"""Return a list of the names of all existing classes."""
- if DEBUG:
+ if hyperdb.DEBUG:
print 'getclasses', (self,)
l = self.classes.keys()
l.sort()
If 'classname' is not a valid class name, a KeyError is raised.
"""
- if DEBUG:
+ if hyperdb.DEBUG:
print 'getclass', (self, classname)
return self.classes[classname]
def clear(self):
'''Delete all database contents
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'clear', (self,)
for cn in self.classes.keys():
for type in 'nodes', 'journals':
''' grab a connection to the class db that will be used for
multiple actions
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'getclassdb', (self, classname, mode)
return self._opendb('nodes.%s'%classname, mode)
'''Low-level database opener that gets around anydbm/dbm
eccentricities.
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print '_opendb', (self, name, mode)
# determine which DB wrote the class file
db_type = ''
# new database? let anydbm pick the best dbm
if not db_type:
- if DEBUG:
+ if hyperdb.DEBUG:
print "_opendb anydbm.open(%r, 'n')"%path
return anydbm.open(path, 'n')
raise hyperdb.DatabaseError, \
"Couldn't open database - the required module '%s'"\
"is not available"%db_type
- if DEBUG:
+ if hyperdb.DEBUG:
print "_opendb %r.open(%r, %r)"%(db_type, path, mode)
return dbm.open(path, mode)
def addnode(self, classname, nodeid, node):
''' add the specified node to its class's db
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'addnode', (self, classname, nodeid, node)
self.newnodes.setdefault(classname, {})[nodeid] = 1
self.cache.setdefault(classname, {})[nodeid] = node
def setnode(self, classname, nodeid, node):
''' change the specified node
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'setnode', (self, classname, nodeid, node)
self.dirtynodes.setdefault(classname, {})[nodeid] = 1
# can't set without having already loaded the node
def savenode(self, classname, nodeid, node):
''' perform the saving of data specified by the set/addnode
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'savenode', (self, classname, nodeid, node)
self.transactions.append((self._doSaveNode, (classname, nodeid, node)))
def getnode(self, classname, nodeid, db=None, cache=1):
''' get a node from the database
'''
- if DEBUG:
- print 'getnode', (self, classname, nodeid, cldb)
+ if hyperdb.DEBUG:
+ print 'getnode', (self, classname, nodeid, db)
if cache:
# try the cache
cache = self.cache.setdefault(classname, {})
def hasnode(self, classname, nodeid, db=None):
''' determine if the database has a given node
'''
- if DEBUG:
- print 'hasnode', (self, classname, nodeid, cldb)
+ if hyperdb.DEBUG:
+ print 'hasnode', (self, classname, nodeid, db)
# try the cache
cache = self.cache.setdefault(classname, {})
if cache.has_key(nodeid):
return res
def countnodes(self, classname, db=None):
- if DEBUG:
- print 'countnodes', (self, classname, cldb)
+ if hyperdb.DEBUG:
+ print 'countnodes', (self, classname, db)
# include the new nodes not saved to the DB yet
count = len(self.newnodes.get(classname, {}))
return count
def getnodeids(self, classname, db=None):
- if DEBUG:
+ if hyperdb.DEBUG:
print 'getnodeids', (self, classname, db)
# start off with the new nodes
res = self.newnodes.get(classname, {}).keys()
#
# Files - special node properties
- #
- def filename(self, classname, nodeid, property=None):
- '''Determine what the filename for the given node and optionally property is.
- '''
- # TODO: split into multiple files directories
- if property:
- return os.path.join(self.dir, 'files', '%s%s.%s'%(classname,
- nodeid, property))
- else:
- # roundupdb.FileClass never specified the property name, so don't include it
- return os.path.join(self.dir, 'files', '%s%s'%(classname,
- nodeid))
-
- def storefile(self, classname, nodeid, property, content):
- '''Store the content of the file in the database. The property may be None, in
- which case the filename does not indicate which property is being saved.
- '''
- name = self.filename(classname, nodeid, property)
- open(name + '.tmp', 'wb').write(content)
- self.transactions.append((self._doStoreFile, (name, )))
-
- def getfile(self, classname, nodeid, property):
- '''Store the content of the file in the database.
- '''
- filename = self.filename(classname, nodeid, property)
- try:
- return open(filename, 'rb').read()
- except:
- return open(filename+'.tmp', 'rb').read()
-
+ # inherited from FileStorage
#
# Journal
'link' or 'unlink' -- 'params' is (classname, nodeid, propname)
'retire' -- 'params' is None
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'addjournal', (self, classname, nodeid, action, params)
self.transactions.append((self._doSaveJournal, (classname, nodeid,
action, params)))
def getjournal(self, classname, nodeid):
''' get the journal for id
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'getjournal', (self, classname, nodeid)
# attempt to open the journal - in some rare cases, the journal may
# not exist
journal = marshal.loads(db[nodeid])
res = []
for entry in journal:
- (nodeid, date_stamp, self.journaltag, action, params) = entry
+ (nodeid, date_stamp, user, action, params) = entry
date_obj = date.Date(date_stamp)
- res.append((nodeid, date_obj, self.journaltag, action, params))
+ res.append((nodeid, date_obj, user, action, params))
return res
+ def pack(self, pack_before):
+ ''' delete all journal entries before 'pack_before' '''
+ if hyperdb.DEBUG:
+ print 'packjournal', (self, pack_before)
+
+ pack_before = pack_before.get_tuple()
+
+ classes = self.getclasses()
+
+ # TODO: factor this out to method - we're already doing it in
+ # _opendb.
+ db_type = ''
+ path = os.path.join(os.getcwd(), self.dir, classes[0])
+ if os.path.exists(path):
+ db_type = whichdb.whichdb(path)
+ if not db_type:
+ raise hyperdb.DatabaseError, "Couldn't identify database type"
+ elif os.path.exists(path+'.db'):
+ db_type = 'dbm'
+
+ for classname in classes:
+ db_name = 'journals.%s'%classname
+ db = self._opendb(db_name, 'w')
+
+ for key in db.keys():
+ journal = marshal.loads(db[key])
+ l = []
+ last_set_entry = None
+ for entry in journal:
+ (nodeid, date_stamp, self.journaltag, action,
+ params) = entry
+ if date_stamp > pack_before or action == 'create':
+ l.append(entry)
+ elif action == 'set':
+ # grab the last set entry to keep information on
+ # activity
+ last_set_entry = entry
+ if last_set_entry:
+ date_stamp = last_set_entry[1]
+ # if the last set entry was made after the pack date
+ # then it is already in the list
+ if date_stamp < pack_before:
+ l.append(last_set_entry)
+ db[key] = marshal.dumps(l)
+ if db_type == 'gdbm':
+ db.reorganize()
+ db.close()
+
#
# Basic transaction support
def commit(self):
''' Commit the current transactions.
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'commit', (self,)
# TODO: lock the DB
self.transactions = []
def _doSaveNode(self, classname, nodeid, node):
- if DEBUG:
+ if hyperdb.DEBUG:
print '_doSaveNode', (self, classname, nodeid, node)
# get the database handle
db[nodeid] = marshal.dumps(node)
def _doSaveJournal(self, classname, nodeid, action, params):
- if DEBUG:
- print '_doSaveJournal', (self, classname, nodeid, action, params)
entry = (nodeid, date.Date().get_tuple(), self.journaltag, action,
params)
+ if hyperdb.DEBUG:
+ print '_doSaveJournal', entry
# get the database handle
db_name = 'journals.%s'%classname
def rollback(self):
''' Reverse all actions from the current transaction.
'''
- if DEBUG:
+ if hyperdb.DEBUG:
print 'rollback', (self, )
for method, args in self.transactions:
# delete temporary files
if method == self._doStoreFile:
- os.remove(args[0]+".tmp")
+ if os.path.exists(args[0]+".tmp"):
+ os.remove(args[0]+".tmp")
self.cache = {}
self.dirtynodes = {}
self.newnodes = {}
#
#$Log: not supported by cvs2svn $
+#Revision 1.28 2002/02/16 09:14:17 richard
+# . #514854 ] History: "User" is always ticket creator
+#
+#Revision 1.27 2002/01/22 07:21:13 richard
+#. fixed back_bsddb so it passed the journal tests
+#
+#... it didn't seem happy using the back_anydbm _open method, which is odd.
+#Yet another occurrance of whichdb not being able to recognise older bsddb
+#databases. Yadda yadda. Made the HYPERDBDEBUG stuff more sane in the
+#process.
+#
+#Revision 1.26 2002/01/22 05:18:38 rochecompaan
+#last_set_entry was referenced before assignment
+#
+#Revision 1.25 2002/01/22 05:06:08 rochecompaan
+#We need to keep the last 'set' entry in the journal to preserve
+#information on 'activity' for nodes.
+#
+#Revision 1.24 2002/01/21 16:33:20 rochecompaan
+#You can now use the roundup-admin tool to pack the database
+#
+#Revision 1.23 2002/01/18 04:32:04 richard
+#Rollback was breaking because a message hadn't actually been written to the file. Needs
+#more investigation.
+#
+#Revision 1.22 2002/01/14 02:20:15 richard
+# . changed all config accesses so they access either the instance or the
+# config attriubute on the db. This means that all config is obtained from
+# instance_config instead of the mish-mash of classes. This will make
+# switching to a ConfigParser setup easier too, I hope.
+#
+#At a minimum, this makes migration a _little_ easier (a lot easier in the
+#0.5.0 switch, I hope!)
+#
+#Revision 1.21 2002/01/02 02:31:38 richard
+#Sorry for the huge checkin message - I was only intending to implement #496356
+#but I found a number of places where things had been broken by transactions:
+# . modified ROUNDUPDBSENDMAILDEBUG to be SENDMAILDEBUG and hold a filename
+# for _all_ roundup-generated smtp messages to be sent to.
+# . the transaction cache had broken the roundupdb.Class set() reactors
+# . newly-created author users in the mailgw weren't being committed to the db
+#
+#Stuff that made it into CHANGES.txt (ie. the stuff I was actually working
+#on when I found that stuff :):
+# . #496356 ] Use threading in messages
+# . detectors were being registered multiple times
+# . added tests for mailgw
+# . much better attaching of erroneous messages in the mail gateway
+#
#Revision 1.20 2001/12/18 15:30:34 rochecompaan
#Fixed bugs:
# . Fixed file creation and retrieval in same transaction in anydbm