46f1df6d74e6fc075a89cbebcf144a99f64f3715
1 #
2 # Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
3 # This module is free software, and you may redistribute it and/or modify
4 # under the same terms as Python, so long as this copyright message and
5 # disclaimer are retained in their original form.
6 #
7 # IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
8 # DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
9 # OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
10 # POSSIBILITY OF SUCH DAMAGE.
11 #
12 # BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
13 # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
14 # FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
15 # BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
16 # SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
17 #
18 #$Id: back_anydbm.py,v 1.37 2002-06-20 23:52:35 richard Exp $
19 '''
20 This module defines a backend that saves the hyperdatabase in a database
21 chosen by anydbm. It is guaranteed to always be available in python
22 versions >2.1.1 (the dumbdbm fallback in 2.1.1 and earlier has several
23 serious bugs, and is not available)
24 '''
26 import whichdb, anydbm, os, marshal
27 from roundup import hyperdb, date
28 from blobfiles import FileStorage
29 from roundup.roundup_indexer import RoundupIndexer
30 from locking import acquire_lock, release_lock
32 #
33 # Now the database
34 #
35 class Database(FileStorage, hyperdb.Database):
36 """A database for storing records containing flexible data types.
38 Transaction stuff TODO:
39 . check the timestamp of the class file and nuke the cache if it's
40 modified. Do some sort of conflict checking on the dirty stuff.
41 . perhaps detect write collisions (related to above)?
43 """
44 def __init__(self, config, journaltag=None):
45 """Open a hyperdatabase given a specifier to some storage.
47 The 'storagelocator' is obtained from config.DATABASE.
48 The meaning of 'storagelocator' depends on the particular
49 implementation of the hyperdatabase. It could be a file name,
50 a directory path, a socket descriptor for a connection to a
51 database over the network, etc.
53 The 'journaltag' is a token that will be attached to the journal
54 entries for any edits done on the database. If 'journaltag' is
55 None, the database is opened in read-only mode: the Class.create(),
56 Class.set(), and Class.retire() methods are disabled.
57 """
58 self.config, self.journaltag = config, journaltag
59 self.dir = config.DATABASE
60 self.classes = {}
61 self.cache = {} # cache of nodes loaded or created
62 self.dirtynodes = {} # keep track of the dirty nodes by class
63 self.newnodes = {} # keep track of the new nodes by class
64 self.transactions = []
65 self.indexer = RoundupIndexer(self.dir)
66 # ensure files are group readable and writable
67 os.umask(0002)
69 def __repr__(self):
70 return '<back_anydbm instance at %x>'%id(self)
72 #
73 # Classes
74 #
75 def __getattr__(self, classname):
76 """A convenient way of calling self.getclass(classname)."""
77 if self.classes.has_key(classname):
78 if __debug__:
79 print >>hyperdb.DEBUG, '__getattr__', (self, classname)
80 return self.classes[classname]
81 raise AttributeError, classname
83 def addclass(self, cl):
84 if __debug__:
85 print >>hyperdb.DEBUG, 'addclass', (self, cl)
86 cn = cl.classname
87 if self.classes.has_key(cn):
88 raise ValueError, cn
89 self.classes[cn] = cl
91 def getclasses(self):
92 """Return a list of the names of all existing classes."""
93 if __debug__:
94 print >>hyperdb.DEBUG, 'getclasses', (self,)
95 l = self.classes.keys()
96 l.sort()
97 return l
99 def getclass(self, classname):
100 """Get the Class object representing a particular class.
102 If 'classname' is not a valid class name, a KeyError is raised.
103 """
104 if __debug__:
105 print >>hyperdb.DEBUG, 'getclass', (self, classname)
106 return self.classes[classname]
108 #
109 # Class DBs
110 #
111 def clear(self):
112 '''Delete all database contents
113 '''
114 if __debug__:
115 print >>hyperdb.DEBUG, 'clear', (self,)
116 for cn in self.classes.keys():
117 for dummy in 'nodes', 'journals':
118 path = os.path.join(self.dir, 'journals.%s'%cn)
119 if os.path.exists(path):
120 os.remove(path)
121 elif os.path.exists(path+'.db'): # dbm appends .db
122 os.remove(path+'.db')
124 def getclassdb(self, classname, mode='r'):
125 ''' grab a connection to the class db that will be used for
126 multiple actions
127 '''
128 if __debug__:
129 print >>hyperdb.DEBUG, 'getclassdb', (self, classname, mode)
130 return self._opendb('nodes.%s'%classname, mode)
132 def _opendb(self, name, mode):
133 '''Low-level database opener that gets around anydbm/dbm
134 eccentricities.
135 '''
136 if __debug__:
137 print >>hyperdb.DEBUG, '_opendb', (self, name, mode)
139 # determine which DB wrote the class file
140 db_type = ''
141 path = os.path.join(os.getcwd(), self.dir, name)
142 if os.path.exists(path):
143 db_type = whichdb.whichdb(path)
144 if not db_type:
145 raise hyperdb.DatabaseError, "Couldn't identify database type"
146 elif os.path.exists(path+'.db'):
147 # if the path ends in '.db', it's a dbm database, whether
148 # anydbm says it's dbhash or not!
149 db_type = 'dbm'
151 # new database? let anydbm pick the best dbm
152 if not db_type:
153 if __debug__:
154 print >>hyperdb.DEBUG, "_opendb anydbm.open(%r, 'n')"%path
155 return anydbm.open(path, 'n')
157 # open the database with the correct module
158 try:
159 dbm = __import__(db_type)
160 except ImportError:
161 raise hyperdb.DatabaseError, \
162 "Couldn't open database - the required module '%s'"\
163 "is not available"%db_type
164 if __debug__:
165 print >>hyperdb.DEBUG, "_opendb %r.open(%r, %r)"%(db_type, path,
166 mode)
167 return dbm.open(path, mode)
169 def _lockdb(self, name):
170 ''' Lock a database file
171 '''
172 path = os.path.join(os.getcwd(), self.dir, '%s.lock'%name)
173 return acquire_lock(path)
175 #
176 # Node IDs
177 #
178 def newid(self, classname):
179 ''' Generate a new id for the given class
180 '''
181 # open the ids DB - create if if doesn't exist
182 lock = self._lockdb('_ids')
183 db = self._opendb('_ids', 'c')
184 if db.has_key(classname):
185 newid = db[classname] = str(int(db[classname]) + 1)
186 else:
187 # the count() bit is transitional - older dbs won't start at 1
188 newid = str(self.getclass(classname).count()+1)
189 db[classname] = newid
190 db.close()
191 release_lock(lock)
192 return newid
194 #
195 # Nodes
196 #
197 def addnode(self, classname, nodeid, node):
198 ''' add the specified node to its class's db
199 '''
200 if __debug__:
201 print >>hyperdb.DEBUG, 'addnode', (self, classname, nodeid, node)
202 self.newnodes.setdefault(classname, {})[nodeid] = 1
203 self.cache.setdefault(classname, {})[nodeid] = node
204 self.savenode(classname, nodeid, node)
206 def setnode(self, classname, nodeid, node):
207 ''' change the specified node
208 '''
209 if __debug__:
210 print >>hyperdb.DEBUG, 'setnode', (self, classname, nodeid, node)
211 self.dirtynodes.setdefault(classname, {})[nodeid] = 1
213 # can't set without having already loaded the node
214 self.cache[classname][nodeid] = node
215 self.savenode(classname, nodeid, node)
217 def savenode(self, classname, nodeid, node):
218 ''' perform the saving of data specified by the set/addnode
219 '''
220 if __debug__:
221 print >>hyperdb.DEBUG, 'savenode', (self, classname, nodeid, node)
222 self.transactions.append((self._doSaveNode, (classname, nodeid, node)))
224 def getnode(self, classname, nodeid, db=None, cache=1):
225 ''' get a node from the database
226 '''
227 if __debug__:
228 print >>hyperdb.DEBUG, 'getnode', (self, classname, nodeid, db)
229 if cache:
230 # try the cache
231 cache_dict = self.cache.setdefault(classname, {})
232 if cache_dict.has_key(nodeid):
233 if __debug__:
234 print >>hyperdb.TRACE, 'get %s %s cached'%(classname,
235 nodeid)
236 return cache_dict[nodeid]
238 if __debug__:
239 print >>hyperdb.TRACE, 'get %s %s'%(classname, nodeid)
241 # get from the database and save in the cache
242 if db is None:
243 db = self.getclassdb(classname)
244 if not db.has_key(nodeid):
245 raise IndexError, "no such %s %s"%(classname, nodeid)
247 # decode
248 res = marshal.loads(db[nodeid])
250 # reverse the serialisation
251 res = self.unserialise(classname, res)
253 # store off in the cache dict
254 if cache:
255 cache_dict[nodeid] = res
257 return res
259 def hasnode(self, classname, nodeid, db=None):
260 ''' determine if the database has a given node
261 '''
262 if __debug__:
263 print >>hyperdb.DEBUG, 'hasnode', (self, classname, nodeid, db)
265 # try the cache
266 cache = self.cache.setdefault(classname, {})
267 if cache.has_key(nodeid):
268 if __debug__:
269 print >>hyperdb.TRACE, 'has %s %s cached'%(classname, nodeid)
270 return 1
271 if __debug__:
272 print >>hyperdb.TRACE, 'has %s %s'%(classname, nodeid)
274 # not in the cache - check the database
275 if db is None:
276 db = self.getclassdb(classname)
277 res = db.has_key(nodeid)
278 return res
280 def countnodes(self, classname, db=None):
281 if __debug__:
282 print >>hyperdb.DEBUG, 'countnodes', (self, classname, db)
283 # include the new nodes not saved to the DB yet
284 count = len(self.newnodes.get(classname, {}))
286 # and count those in the DB
287 if db is None:
288 db = self.getclassdb(classname)
289 count = count + len(db.keys())
290 return count
292 def getnodeids(self, classname, db=None):
293 if __debug__:
294 print >>hyperdb.DEBUG, 'getnodeids', (self, classname, db)
295 # start off with the new nodes
296 res = self.newnodes.get(classname, {}).keys()
298 if db is None:
299 db = self.getclassdb(classname)
300 res = res + db.keys()
301 return res
304 #
305 # Files - special node properties
306 # inherited from FileStorage
308 #
309 # Journal
310 #
311 def addjournal(self, classname, nodeid, action, params):
312 ''' Journal the Action
313 'action' may be:
315 'create' or 'set' -- 'params' is a dictionary of property values
316 'link' or 'unlink' -- 'params' is (classname, nodeid, propname)
317 'retire' -- 'params' is None
318 '''
319 if __debug__:
320 print >>hyperdb.DEBUG, 'addjournal', (self, classname, nodeid,
321 action, params)
322 self.transactions.append((self._doSaveJournal, (classname, nodeid,
323 action, params)))
325 def getjournal(self, classname, nodeid):
326 ''' get the journal for id
327 '''
328 if __debug__:
329 print >>hyperdb.DEBUG, 'getjournal', (self, classname, nodeid)
330 # attempt to open the journal - in some rare cases, the journal may
331 # not exist
332 try:
333 db = self._opendb('journals.%s'%classname, 'r')
334 except anydbm.error, error:
335 if str(error) == "need 'c' or 'n' flag to open new db": return []
336 elif error.args[0] != 2: raise
337 return []
338 try:
339 journal = marshal.loads(db[nodeid])
340 except KeyError:
341 raise KeyError, 'no such %s %s'%(classname, nodeid)
342 res = []
343 for entry in journal:
344 (nodeid, date_stamp, user, action, params) = entry
345 date_obj = date.Date(date_stamp)
346 res.append((nodeid, date_obj, user, action, params))
347 return res
349 def pack(self, pack_before):
350 ''' delete all journal entries before 'pack_before' '''
351 if __debug__:
352 print >>hyperdb.DEBUG, 'packjournal', (self, pack_before)
354 pack_before = pack_before.get_tuple()
356 classes = self.getclasses()
358 # TODO: factor this out to method - we're already doing it in
359 # _opendb.
360 db_type = ''
361 path = os.path.join(os.getcwd(), self.dir, classes[0])
362 if os.path.exists(path):
363 db_type = whichdb.whichdb(path)
364 if not db_type:
365 raise hyperdb.DatabaseError, "Couldn't identify database type"
366 elif os.path.exists(path+'.db'):
367 db_type = 'dbm'
369 for classname in classes:
370 db_name = 'journals.%s'%classname
371 db = self._opendb(db_name, 'w')
373 for key in db.keys():
374 journal = marshal.loads(db[key])
375 l = []
376 last_set_entry = None
377 for entry in journal:
378 (nodeid, date_stamp, self.journaltag, action,
379 params) = entry
380 if date_stamp > pack_before or action == 'create':
381 l.append(entry)
382 elif action == 'set':
383 # grab the last set entry to keep information on
384 # activity
385 last_set_entry = entry
386 if last_set_entry:
387 date_stamp = last_set_entry[1]
388 # if the last set entry was made after the pack date
389 # then it is already in the list
390 if date_stamp < pack_before:
391 l.append(last_set_entry)
392 db[key] = marshal.dumps(l)
393 if db_type == 'gdbm':
394 db.reorganize()
395 db.close()
398 #
399 # Basic transaction support
400 #
401 def commit(self):
402 ''' Commit the current transactions.
403 '''
404 if __debug__:
405 print >>hyperdb.DEBUG, 'commit', (self,)
406 # TODO: lock the DB
408 # keep a handle to all the database files opened
409 self.databases = {}
411 # now, do all the transactions
412 for method, args in self.transactions:
413 method(*args)
415 # now close all the database files
416 for db in self.databases.values():
417 db.close()
418 del self.databases
419 # TODO: unlock the DB
421 # all transactions committed, back to normal
422 self.cache = {}
423 self.dirtynodes = {}
424 self.newnodes = {}
425 self.transactions = []
427 def _doSaveNode(self, classname, nodeid, node):
428 if __debug__:
429 print >>hyperdb.DEBUG, '_doSaveNode', (self, classname, nodeid,
430 node)
432 # get the database handle
433 db_name = 'nodes.%s'%classname
434 if self.databases.has_key(db_name):
435 db = self.databases[db_name]
436 else:
437 db = self.databases[db_name] = self.getclassdb(classname, 'c')
439 # now save the marshalled data
440 db[nodeid] = marshal.dumps(self.serialise(classname, node))
442 def _doSaveJournal(self, classname, nodeid, action, params):
443 # serialise first
444 if action in ('set', 'create'):
445 params = self.serialise(classname, params)
447 # create the journal entry
448 entry = (nodeid, date.Date().get_tuple(), self.journaltag, action,
449 params)
451 if __debug__:
452 print >>hyperdb.DEBUG, '_doSaveJournal', entry
454 # get the database handle
455 db_name = 'journals.%s'%classname
456 if self.databases.has_key(db_name):
457 db = self.databases[db_name]
458 else:
459 db = self.databases[db_name] = self._opendb(db_name, 'c')
461 # now insert the journal entry
462 if db.has_key(nodeid):
463 # append to existing
464 s = db[nodeid]
465 l = marshal.loads(s)
466 l.append(entry)
467 else:
468 l = [entry]
470 db[nodeid] = marshal.dumps(l)
472 def rollback(self):
473 ''' Reverse all actions from the current transaction.
474 '''
475 if __debug__:
476 print >>hyperdb.DEBUG, 'rollback', (self, )
477 for method, args in self.transactions:
478 # delete temporary files
479 if method == self._doStoreFile:
480 if os.path.exists(args[0]+".tmp"):
481 os.remove(args[0]+".tmp")
482 self.cache = {}
483 self.dirtynodes = {}
484 self.newnodes = {}
485 self.transactions = []
487 #
488 #$Log: not supported by cvs2svn $
489 #Revision 1.36 2002/06/19 03:07:19 richard
490 #Moved the file storage commit into blobfiles where it belongs.
491 #
492 #Revision 1.35 2002/05/25 07:16:24 rochecompaan
493 #Merged search_indexing-branch with HEAD
494 #
495 #Revision 1.34 2002/05/15 06:21:21 richard
496 # . node caching now works, and gives a small boost in performance
497 #
498 #As a part of this, I cleaned up the DEBUG output and implemented TRACE
499 #output (HYPERDBTRACE='file to trace to') with checkpoints at the start of
500 #CGI requests. Run roundup with python -O to skip all the DEBUG/TRACE stuff
501 #(using if __debug__ which is compiled out with -O)
502 #
503 #Revision 1.33 2002/04/24 10:38:26 rochecompaan
504 #All database files are now created group readable and writable.
505 #
506 #Revision 1.32 2002/04/15 23:25:15 richard
507 #. node ids are now generated from a lockable store - no more race conditions
508 #
509 #We're using the portalocker code by Jonathan Feinberg that was contributed
510 #to the ASPN Python cookbook. This gives us locking across Unix and Windows.
511 #
512 #Revision 1.31 2002/04/03 05:54:31 richard
513 #Fixed serialisation problem by moving the serialisation step out of the
514 #hyperdb.Class (get, set) into the hyperdb.Database.
515 #
516 #Also fixed htmltemplate after the showid changes I made yesterday.
517 #
518 #Unit tests for all of the above written.
519 #
520 #Revision 1.30.2.1 2002/04/03 11:55:57 rochecompaan
521 # . Added feature #526730 - search for messages capability
522 #
523 #Revision 1.30 2002/02/27 03:40:59 richard
524 #Ran it through pychecker, made fixes
525 #
526 #Revision 1.29 2002/02/25 14:34:31 grubert
527 # . use blobfiles in back_anydbm which is used in back_bsddb.
528 # change test_db as dirlist does not work for subdirectories.
529 # ATTENTION: blobfiles now creates subdirectories for files.
530 #
531 #Revision 1.28 2002/02/16 09:14:17 richard
532 # . #514854 ] History: "User" is always ticket creator
533 #
534 #Revision 1.27 2002/01/22 07:21:13 richard
535 #. fixed back_bsddb so it passed the journal tests
536 #
537 #... it didn't seem happy using the back_anydbm _open method, which is odd.
538 #Yet another occurrance of whichdb not being able to recognise older bsddb
539 #databases. Yadda yadda. Made the HYPERDBDEBUG stuff more sane in the
540 #process.
541 #
542 #Revision 1.26 2002/01/22 05:18:38 rochecompaan
543 #last_set_entry was referenced before assignment
544 #
545 #Revision 1.25 2002/01/22 05:06:08 rochecompaan
546 #We need to keep the last 'set' entry in the journal to preserve
547 #information on 'activity' for nodes.
548 #
549 #Revision 1.24 2002/01/21 16:33:20 rochecompaan
550 #You can now use the roundup-admin tool to pack the database
551 #
552 #Revision 1.23 2002/01/18 04:32:04 richard
553 #Rollback was breaking because a message hadn't actually been written to the file. Needs
554 #more investigation.
555 #
556 #Revision 1.22 2002/01/14 02:20:15 richard
557 # . changed all config accesses so they access either the instance or the
558 # config attriubute on the db. This means that all config is obtained from
559 # instance_config instead of the mish-mash of classes. This will make
560 # switching to a ConfigParser setup easier too, I hope.
561 #
562 #At a minimum, this makes migration a _little_ easier (a lot easier in the
563 #0.5.0 switch, I hope!)
564 #
565 #Revision 1.21 2002/01/02 02:31:38 richard
566 #Sorry for the huge checkin message - I was only intending to implement #496356
567 #but I found a number of places where things had been broken by transactions:
568 # . modified ROUNDUPDBSENDMAILDEBUG to be SENDMAILDEBUG and hold a filename
569 # for _all_ roundup-generated smtp messages to be sent to.
570 # . the transaction cache had broken the roundupdb.Class set() reactors
571 # . newly-created author users in the mailgw weren't being committed to the db
572 #
573 #Stuff that made it into CHANGES.txt (ie. the stuff I was actually working
574 #on when I found that stuff :):
575 # . #496356 ] Use threading in messages
576 # . detectors were being registered multiple times
577 # . added tests for mailgw
578 # . much better attaching of erroneous messages in the mail gateway
579 #
580 #Revision 1.20 2001/12/18 15:30:34 rochecompaan
581 #Fixed bugs:
582 # . Fixed file creation and retrieval in same transaction in anydbm
583 # backend
584 # . Cgi interface now renders new issue after issue creation
585 # . Could not set issue status to resolved through cgi interface
586 # . Mail gateway was changing status back to 'chatting' if status was
587 # omitted as an argument
588 #
589 #Revision 1.19 2001/12/17 03:52:48 richard
590 #Implemented file store rollback. As a bonus, the hyperdb is now capable of
591 #storing more than one file per node - if a property name is supplied,
592 #the file is called designator.property.
593 #I decided not to migrate the existing files stored over to the new naming
594 #scheme - the FileClass just doesn't specify the property name.
595 #
596 #Revision 1.18 2001/12/16 10:53:38 richard
597 #take a copy of the node dict so that the subsequent set
598 #operation doesn't modify the oldvalues structure
599 #
600 #Revision 1.17 2001/12/14 23:42:57 richard
601 #yuck, a gdbm instance tests false :(
602 #I've left the debugging code in - it should be removed one day if we're ever
603 #_really_ anal about performace :)
604 #
605 #Revision 1.16 2001/12/12 03:23:14 richard
606 #Cor blimey this anydbm/whichdb stuff is yecchy. Turns out that whichdb
607 #incorrectly identifies a dbm file as a dbhash file on my system. This has
608 #been submitted to the python bug tracker as issue #491888:
609 #https://sourceforge.net/tracker/index.php?func=detail&aid=491888&group_id=5470&atid=105470
610 #
611 #Revision 1.15 2001/12/12 02:30:51 richard
612 #I fixed the problems with people whose anydbm was using the dbm module at the
613 #backend. It turns out the dbm module modifies the file name to append ".db"
614 #and my check to determine if we're opening an existing or new db just
615 #tested os.path.exists() on the filename. Well, no longer! We now perform a
616 #much better check _and_ cope with the anydbm implementation module changing
617 #too!
618 #I also fixed the backends __init__ so only ImportError is squashed.
619 #
620 #Revision 1.14 2001/12/10 22:20:01 richard
621 #Enabled transaction support in the bsddb backend. It uses the anydbm code
622 #where possible, only replacing methods where the db is opened (it uses the
623 #btree opener specifically.)
624 #Also cleaned up some change note generation.
625 #Made the backends package work with pydoc too.
626 #
627 #Revision 1.13 2001/12/02 05:06:16 richard
628 #. We now use weakrefs in the Classes to keep the database reference, so
629 # the close() method on the database is no longer needed.
630 # I bumped the minimum python requirement up to 2.1 accordingly.
631 #. #487480 ] roundup-server
632 #. #487476 ] INSTALL.txt
633 #
634 #I also cleaned up the change message / post-edit stuff in the cgi client.
635 #There's now a clearly marked "TODO: append the change note" where I believe
636 #the change note should be added there. The "changes" list will obviously
637 #have to be modified to be a dict of the changes, or somesuch.
638 #
639 #More testing needed.
640 #
641 #Revision 1.12 2001/12/01 07:17:50 richard
642 #. We now have basic transaction support! Information is only written to
643 # the database when the commit() method is called. Only the anydbm
644 # backend is modified in this way - neither of the bsddb backends have been.
645 # The mail, admin and cgi interfaces all use commit (except the admin tool
646 # doesn't have a commit command, so interactive users can't commit...)
647 #. Fixed login/registration forwarding the user to the right page (or not,
648 # on a failure)
649 #
650 #Revision 1.11 2001/11/21 02:34:18 richard
651 #Added a target version field to the extended issue schema
652 #
653 #Revision 1.10 2001/10/09 23:58:10 richard
654 #Moved the data stringification up into the hyperdb.Class class' get, set
655 #and create methods. This means that the data is also stringified for the
656 #journal call, and removes duplication of code from the backends. The
657 #backend code now only sees strings.
658 #
659 #Revision 1.9 2001/10/09 07:25:59 richard
660 #Added the Password property type. See "pydoc roundup.password" for
661 #implementation details. Have updated some of the documentation too.
662 #
663 #Revision 1.8 2001/09/29 13:27:00 richard
664 #CGI interfaces now spit up a top-level index of all the instances they can
665 #serve.
666 #
667 #Revision 1.7 2001/08/12 06:32:36 richard
668 #using isinstance(blah, Foo) now instead of isFooType
669 #
670 #Revision 1.6 2001/08/07 00:24:42 richard
671 #stupid typo
672 #
673 #Revision 1.5 2001/08/07 00:15:51 richard
674 #Added the copyright/license notice to (nearly) all files at request of
675 #Bizar Software.
676 #
677 #Revision 1.4 2001/07/30 01:41:36 richard
678 #Makes schema changes mucho easier.
679 #
680 #Revision 1.3 2001/07/25 01:23:07 richard
681 #Added the Roundup spec to the new documentation directory.
682 #
683 #Revision 1.2 2001/07/23 08:20:44 richard
684 #Moved over to using marshal in the bsddb and anydbm backends.
685 #roundup-admin now has a "freshen" command that'll load/save all nodes (not
686 # retired - mod hyperdb.Class.list() so it lists retired nodes)
687 #
688 #