1 #
2 # Copyright (c) 2001 Bizar Software Pty Ltd (http://www.bizarsoftware.com.au/)
3 # This module is free software, and you may redistribute it and/or modify
4 # under the same terms as Python, so long as this copyright message and
5 # disclaimer are retained in their original form.
6 #
7 # IN NO EVENT SHALL BIZAR SOFTWARE PTY LTD BE LIABLE TO ANY PARTY FOR
8 # DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING
9 # OUT OF THE USE OF THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE
10 # POSSIBILITY OF SUCH DAMAGE.
11 #
12 # BIZAR SOFTWARE PTY LTD SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
13 # BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
14 # FOR A PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS"
15 # BASIS, AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
16 # SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
17 #
18 #$Id: back_anydbm.py,v 1.35 2002-05-25 07:16:24 rochecompaan Exp $
19 '''
20 This module defines a backend that saves the hyperdatabase in a database
21 chosen by anydbm. It is guaranteed to always be available in python
22 versions >2.1.1 (the dumbdbm fallback in 2.1.1 and earlier has several
23 serious bugs, and is not available)
24 '''
26 import whichdb, anydbm, os, marshal
27 from roundup import hyperdb, date
28 from blobfiles import FileStorage
29 from roundup.roundup_indexer import RoundupIndexer
30 from locking import acquire_lock, release_lock
32 #
33 # Now the database
34 #
35 class Database(FileStorage, hyperdb.Database):
36 """A database for storing records containing flexible data types.
38 Transaction stuff TODO:
39 . check the timestamp of the class file and nuke the cache if it's
40 modified. Do some sort of conflict checking on the dirty stuff.
41 . perhaps detect write collisions (related to above)?
43 """
44 def __init__(self, config, journaltag=None):
45 """Open a hyperdatabase given a specifier to some storage.
47 The 'storagelocator' is obtained from config.DATABASE.
48 The meaning of 'storagelocator' depends on the particular
49 implementation of the hyperdatabase. It could be a file name,
50 a directory path, a socket descriptor for a connection to a
51 database over the network, etc.
53 The 'journaltag' is a token that will be attached to the journal
54 entries for any edits done on the database. If 'journaltag' is
55 None, the database is opened in read-only mode: the Class.create(),
56 Class.set(), and Class.retire() methods are disabled.
57 """
58 self.config, self.journaltag = config, journaltag
59 self.dir = config.DATABASE
60 self.classes = {}
61 self.cache = {} # cache of nodes loaded or created
62 self.dirtynodes = {} # keep track of the dirty nodes by class
63 self.newnodes = {} # keep track of the new nodes by class
64 self.transactions = []
65 self.indexer = RoundupIndexer(self.dir)
66 # ensure files are group readable and writable
67 os.umask(0002)
69 def __repr__(self):
70 return '<back_anydbm instance at %x>'%id(self)
72 #
73 # Classes
74 #
75 def __getattr__(self, classname):
76 """A convenient way of calling self.getclass(classname)."""
77 if self.classes.has_key(classname):
78 if __debug__:
79 print >>hyperdb.DEBUG, '__getattr__', (self, classname)
80 return self.classes[classname]
81 raise AttributeError, classname
83 def addclass(self, cl):
84 if __debug__:
85 print >>hyperdb.DEBUG, 'addclass', (self, cl)
86 cn = cl.classname
87 if self.classes.has_key(cn):
88 raise ValueError, cn
89 self.classes[cn] = cl
91 def getclasses(self):
92 """Return a list of the names of all existing classes."""
93 if __debug__:
94 print >>hyperdb.DEBUG, 'getclasses', (self,)
95 l = self.classes.keys()
96 l.sort()
97 return l
99 def getclass(self, classname):
100 """Get the Class object representing a particular class.
102 If 'classname' is not a valid class name, a KeyError is raised.
103 """
104 if __debug__:
105 print >>hyperdb.DEBUG, 'getclass', (self, classname)
106 return self.classes[classname]
108 #
109 # Class DBs
110 #
111 def clear(self):
112 '''Delete all database contents
113 '''
114 if __debug__:
115 print >>hyperdb.DEBUG, 'clear', (self,)
116 for cn in self.classes.keys():
117 for dummy in 'nodes', 'journals':
118 path = os.path.join(self.dir, 'journals.%s'%cn)
119 if os.path.exists(path):
120 os.remove(path)
121 elif os.path.exists(path+'.db'): # dbm appends .db
122 os.remove(path+'.db')
124 def getclassdb(self, classname, mode='r'):
125 ''' grab a connection to the class db that will be used for
126 multiple actions
127 '''
128 if __debug__:
129 print >>hyperdb.DEBUG, 'getclassdb', (self, classname, mode)
130 return self._opendb('nodes.%s'%classname, mode)
132 def _opendb(self, name, mode):
133 '''Low-level database opener that gets around anydbm/dbm
134 eccentricities.
135 '''
136 if __debug__:
137 print >>hyperdb.DEBUG, '_opendb', (self, name, mode)
139 # determine which DB wrote the class file
140 db_type = ''
141 path = os.path.join(os.getcwd(), self.dir, name)
142 if os.path.exists(path):
143 db_type = whichdb.whichdb(path)
144 if not db_type:
145 raise hyperdb.DatabaseError, "Couldn't identify database type"
146 elif os.path.exists(path+'.db'):
147 # if the path ends in '.db', it's a dbm database, whether
148 # anydbm says it's dbhash or not!
149 db_type = 'dbm'
151 # new database? let anydbm pick the best dbm
152 if not db_type:
153 if __debug__:
154 print >>hyperdb.DEBUG, "_opendb anydbm.open(%r, 'n')"%path
155 return anydbm.open(path, 'n')
157 # open the database with the correct module
158 try:
159 dbm = __import__(db_type)
160 except ImportError:
161 raise hyperdb.DatabaseError, \
162 "Couldn't open database - the required module '%s'"\
163 "is not available"%db_type
164 if __debug__:
165 print >>hyperdb.DEBUG, "_opendb %r.open(%r, %r)"%(db_type, path,
166 mode)
167 return dbm.open(path, mode)
169 def _lockdb(self, name):
170 ''' Lock a database file
171 '''
172 path = os.path.join(os.getcwd(), self.dir, '%s.lock'%name)
173 return acquire_lock(path)
175 #
176 # Node IDs
177 #
178 def newid(self, classname):
179 ''' Generate a new id for the given class
180 '''
181 # open the ids DB - create if if doesn't exist
182 lock = self._lockdb('_ids')
183 db = self._opendb('_ids', 'c')
184 if db.has_key(classname):
185 newid = db[classname] = str(int(db[classname]) + 1)
186 else:
187 # the count() bit is transitional - older dbs won't start at 1
188 newid = str(self.getclass(classname).count()+1)
189 db[classname] = newid
190 db.close()
191 release_lock(lock)
192 return newid
194 #
195 # Nodes
196 #
197 def addnode(self, classname, nodeid, node):
198 ''' add the specified node to its class's db
199 '''
200 if __debug__:
201 print >>hyperdb.DEBUG, 'addnode', (self, classname, nodeid, node)
202 self.newnodes.setdefault(classname, {})[nodeid] = 1
203 self.cache.setdefault(classname, {})[nodeid] = node
204 self.savenode(classname, nodeid, node)
206 def setnode(self, classname, nodeid, node):
207 ''' change the specified node
208 '''
209 if __debug__:
210 print >>hyperdb.DEBUG, 'setnode', (self, classname, nodeid, node)
211 self.dirtynodes.setdefault(classname, {})[nodeid] = 1
213 # can't set without having already loaded the node
214 self.cache[classname][nodeid] = node
215 self.savenode(classname, nodeid, node)
217 def savenode(self, classname, nodeid, node):
218 ''' perform the saving of data specified by the set/addnode
219 '''
220 if __debug__:
221 print >>hyperdb.DEBUG, 'savenode', (self, classname, nodeid, node)
222 self.transactions.append((self._doSaveNode, (classname, nodeid, node)))
224 def getnode(self, classname, nodeid, db=None, cache=1):
225 ''' get a node from the database
226 '''
227 if __debug__:
228 print >>hyperdb.DEBUG, 'getnode', (self, classname, nodeid, db)
229 if cache:
230 # try the cache
231 cache_dict = self.cache.setdefault(classname, {})
232 if cache_dict.has_key(nodeid):
233 if __debug__:
234 print >>hyperdb.TRACE, 'get %s %s cached'%(classname,
235 nodeid)
236 return cache_dict[nodeid]
238 if __debug__:
239 print >>hyperdb.TRACE, 'get %s %s'%(classname, nodeid)
241 # get from the database and save in the cache
242 if db is None:
243 db = self.getclassdb(classname)
244 if not db.has_key(nodeid):
245 raise IndexError, "no such %s %s"%(classname, nodeid)
247 # decode
248 res = marshal.loads(db[nodeid])
250 # reverse the serialisation
251 res = self.unserialise(classname, res)
253 # store off in the cache dict
254 if cache:
255 cache_dict[nodeid] = res
257 return res
259 def hasnode(self, classname, nodeid, db=None):
260 ''' determine if the database has a given node
261 '''
262 if __debug__:
263 print >>hyperdb.DEBUG, 'hasnode', (self, classname, nodeid, db)
265 # try the cache
266 cache = self.cache.setdefault(classname, {})
267 if cache.has_key(nodeid):
268 if __debug__:
269 print >>hyperdb.TRACE, 'has %s %s cached'%(classname, nodeid)
270 return 1
271 if __debug__:
272 print >>hyperdb.TRACE, 'has %s %s'%(classname, nodeid)
274 # not in the cache - check the database
275 if db is None:
276 db = self.getclassdb(classname)
277 res = db.has_key(nodeid)
278 return res
280 def countnodes(self, classname, db=None):
281 if __debug__:
282 print >>hyperdb.DEBUG, 'countnodes', (self, classname, db)
283 # include the new nodes not saved to the DB yet
284 count = len(self.newnodes.get(classname, {}))
286 # and count those in the DB
287 if db is None:
288 db = self.getclassdb(classname)
289 count = count + len(db.keys())
290 return count
292 def getnodeids(self, classname, db=None):
293 if __debug__:
294 print >>hyperdb.DEBUG, 'getnodeids', (self, classname, db)
295 # start off with the new nodes
296 res = self.newnodes.get(classname, {}).keys()
298 if db is None:
299 db = self.getclassdb(classname)
300 res = res + db.keys()
301 return res
304 #
305 # Files - special node properties
306 # inherited from FileStorage
308 #
309 # Journal
310 #
311 def addjournal(self, classname, nodeid, action, params):
312 ''' Journal the Action
313 'action' may be:
315 'create' or 'set' -- 'params' is a dictionary of property values
316 'link' or 'unlink' -- 'params' is (classname, nodeid, propname)
317 'retire' -- 'params' is None
318 '''
319 if __debug__:
320 print >>hyperdb.DEBUG, 'addjournal', (self, classname, nodeid,
321 action, params)
322 self.transactions.append((self._doSaveJournal, (classname, nodeid,
323 action, params)))
325 def getjournal(self, classname, nodeid):
326 ''' get the journal for id
327 '''
328 if __debug__:
329 print >>hyperdb.DEBUG, 'getjournal', (self, classname, nodeid)
330 # attempt to open the journal - in some rare cases, the journal may
331 # not exist
332 try:
333 db = self._opendb('journals.%s'%classname, 'r')
334 except anydbm.error, error:
335 if str(error) == "need 'c' or 'n' flag to open new db": return []
336 elif error.args[0] != 2: raise
337 return []
338 journal = marshal.loads(db[nodeid])
339 res = []
340 for entry in journal:
341 (nodeid, date_stamp, user, action, params) = entry
342 date_obj = date.Date(date_stamp)
343 res.append((nodeid, date_obj, user, action, params))
344 return res
346 def pack(self, pack_before):
347 ''' delete all journal entries before 'pack_before' '''
348 if __debug__:
349 print >>hyperdb.DEBUG, 'packjournal', (self, pack_before)
351 pack_before = pack_before.get_tuple()
353 classes = self.getclasses()
355 # TODO: factor this out to method - we're already doing it in
356 # _opendb.
357 db_type = ''
358 path = os.path.join(os.getcwd(), self.dir, classes[0])
359 if os.path.exists(path):
360 db_type = whichdb.whichdb(path)
361 if not db_type:
362 raise hyperdb.DatabaseError, "Couldn't identify database type"
363 elif os.path.exists(path+'.db'):
364 db_type = 'dbm'
366 for classname in classes:
367 db_name = 'journals.%s'%classname
368 db = self._opendb(db_name, 'w')
370 for key in db.keys():
371 journal = marshal.loads(db[key])
372 l = []
373 last_set_entry = None
374 for entry in journal:
375 (nodeid, date_stamp, self.journaltag, action,
376 params) = entry
377 if date_stamp > pack_before or action == 'create':
378 l.append(entry)
379 elif action == 'set':
380 # grab the last set entry to keep information on
381 # activity
382 last_set_entry = entry
383 if last_set_entry:
384 date_stamp = last_set_entry[1]
385 # if the last set entry was made after the pack date
386 # then it is already in the list
387 if date_stamp < pack_before:
388 l.append(last_set_entry)
389 db[key] = marshal.dumps(l)
390 if db_type == 'gdbm':
391 db.reorganize()
392 db.close()
395 #
396 # Basic transaction support
397 #
398 def commit(self):
399 ''' Commit the current transactions.
400 '''
401 if __debug__:
402 print >>hyperdb.DEBUG, 'commit', (self,)
403 # TODO: lock the DB
405 # keep a handle to all the database files opened
406 self.databases = {}
408 # now, do all the transactions
409 for method, args in self.transactions:
410 method(*args)
412 # now close all the database files
413 for db in self.databases.values():
414 db.close()
415 del self.databases
416 # TODO: unlock the DB
418 # all transactions committed, back to normal
419 self.cache = {}
420 self.dirtynodes = {}
421 self.newnodes = {}
422 self.transactions = []
424 def _doSaveNode(self, classname, nodeid, node):
425 if __debug__:
426 print >>hyperdb.DEBUG, '_doSaveNode', (self, classname, nodeid,
427 node)
429 # get the database handle
430 db_name = 'nodes.%s'%classname
431 if self.databases.has_key(db_name):
432 db = self.databases[db_name]
433 else:
434 db = self.databases[db_name] = self.getclassdb(classname, 'c')
436 # now save the marshalled data
437 db[nodeid] = marshal.dumps(self.serialise(classname, node))
439 def _doSaveJournal(self, classname, nodeid, action, params):
440 # serialise first
441 if action in ('set', 'create'):
442 params = self.serialise(classname, params)
444 # create the journal entry
445 entry = (nodeid, date.Date().get_tuple(), self.journaltag, action,
446 params)
448 if __debug__:
449 print >>hyperdb.DEBUG, '_doSaveJournal', entry
451 # get the database handle
452 db_name = 'journals.%s'%classname
453 if self.databases.has_key(db_name):
454 db = self.databases[db_name]
455 else:
456 db = self.databases[db_name] = self._opendb(db_name, 'c')
458 # now insert the journal entry
459 if db.has_key(nodeid):
460 # append to existing
461 s = db[nodeid]
462 l = marshal.loads(s)
463 l.append(entry)
464 else:
465 l = [entry]
467 db[nodeid] = marshal.dumps(l)
469 def _doStoreFile(self, name, **databases):
470 # the file is currently ".tmp" - move it to its real name to commit
471 os.rename(name+".tmp", name)
472 pattern = name.split('/')[-1]
473 self.indexer.add_files(dir=os.path.dirname(name), pattern=pattern)
474 self.indexer.save_index()
476 def rollback(self):
477 ''' Reverse all actions from the current transaction.
478 '''
479 if __debug__:
480 print >>hyperdb.DEBUG, 'rollback', (self, )
481 for method, args in self.transactions:
482 # delete temporary files
483 if method == self._doStoreFile:
484 if os.path.exists(args[0]+".tmp"):
485 os.remove(args[0]+".tmp")
486 self.cache = {}
487 self.dirtynodes = {}
488 self.newnodes = {}
489 self.transactions = []
491 #
492 #$Log: not supported by cvs2svn $
493 #Revision 1.34 2002/05/15 06:21:21 richard
494 # . node caching now works, and gives a small boost in performance
495 #
496 #As a part of this, I cleaned up the DEBUG output and implemented TRACE
497 #output (HYPERDBTRACE='file to trace to') with checkpoints at the start of
498 #CGI requests. Run roundup with python -O to skip all the DEBUG/TRACE stuff
499 #(using if __debug__ which is compiled out with -O)
500 #
501 #Revision 1.33 2002/04/24 10:38:26 rochecompaan
502 #All database files are now created group readable and writable.
503 #
504 #Revision 1.32 2002/04/15 23:25:15 richard
505 #. node ids are now generated from a lockable store - no more race conditions
506 #
507 #We're using the portalocker code by Jonathan Feinberg that was contributed
508 #to the ASPN Python cookbook. This gives us locking across Unix and Windows.
509 #
510 #Revision 1.31 2002/04/03 05:54:31 richard
511 #Fixed serialisation problem by moving the serialisation step out of the
512 #hyperdb.Class (get, set) into the hyperdb.Database.
513 #
514 #Also fixed htmltemplate after the showid changes I made yesterday.
515 #
516 #Unit tests for all of the above written.
517 #
518 #Revision 1.30.2.1 2002/04/03 11:55:57 rochecompaan
519 # . Added feature #526730 - search for messages capability
520 #
521 #Revision 1.30 2002/02/27 03:40:59 richard
522 #Ran it through pychecker, made fixes
523 #
524 #Revision 1.29 2002/02/25 14:34:31 grubert
525 # . use blobfiles in back_anydbm which is used in back_bsddb.
526 # change test_db as dirlist does not work for subdirectories.
527 # ATTENTION: blobfiles now creates subdirectories for files.
528 #
529 #Revision 1.28 2002/02/16 09:14:17 richard
530 # . #514854 ] History: "User" is always ticket creator
531 #
532 #Revision 1.27 2002/01/22 07:21:13 richard
533 #. fixed back_bsddb so it passed the journal tests
534 #
535 #... it didn't seem happy using the back_anydbm _open method, which is odd.
536 #Yet another occurrance of whichdb not being able to recognise older bsddb
537 #databases. Yadda yadda. Made the HYPERDBDEBUG stuff more sane in the
538 #process.
539 #
540 #Revision 1.26 2002/01/22 05:18:38 rochecompaan
541 #last_set_entry was referenced before assignment
542 #
543 #Revision 1.25 2002/01/22 05:06:08 rochecompaan
544 #We need to keep the last 'set' entry in the journal to preserve
545 #information on 'activity' for nodes.
546 #
547 #Revision 1.24 2002/01/21 16:33:20 rochecompaan
548 #You can now use the roundup-admin tool to pack the database
549 #
550 #Revision 1.23 2002/01/18 04:32:04 richard
551 #Rollback was breaking because a message hadn't actually been written to the file. Needs
552 #more investigation.
553 #
554 #Revision 1.22 2002/01/14 02:20:15 richard
555 # . changed all config accesses so they access either the instance or the
556 # config attriubute on the db. This means that all config is obtained from
557 # instance_config instead of the mish-mash of classes. This will make
558 # switching to a ConfigParser setup easier too, I hope.
559 #
560 #At a minimum, this makes migration a _little_ easier (a lot easier in the
561 #0.5.0 switch, I hope!)
562 #
563 #Revision 1.21 2002/01/02 02:31:38 richard
564 #Sorry for the huge checkin message - I was only intending to implement #496356
565 #but I found a number of places where things had been broken by transactions:
566 # . modified ROUNDUPDBSENDMAILDEBUG to be SENDMAILDEBUG and hold a filename
567 # for _all_ roundup-generated smtp messages to be sent to.
568 # . the transaction cache had broken the roundupdb.Class set() reactors
569 # . newly-created author users in the mailgw weren't being committed to the db
570 #
571 #Stuff that made it into CHANGES.txt (ie. the stuff I was actually working
572 #on when I found that stuff :):
573 # . #496356 ] Use threading in messages
574 # . detectors were being registered multiple times
575 # . added tests for mailgw
576 # . much better attaching of erroneous messages in the mail gateway
577 #
578 #Revision 1.20 2001/12/18 15:30:34 rochecompaan
579 #Fixed bugs:
580 # . Fixed file creation and retrieval in same transaction in anydbm
581 # backend
582 # . Cgi interface now renders new issue after issue creation
583 # . Could not set issue status to resolved through cgi interface
584 # . Mail gateway was changing status back to 'chatting' if status was
585 # omitted as an argument
586 #
587 #Revision 1.19 2001/12/17 03:52:48 richard
588 #Implemented file store rollback. As a bonus, the hyperdb is now capable of
589 #storing more than one file per node - if a property name is supplied,
590 #the file is called designator.property.
591 #I decided not to migrate the existing files stored over to the new naming
592 #scheme - the FileClass just doesn't specify the property name.
593 #
594 #Revision 1.18 2001/12/16 10:53:38 richard
595 #take a copy of the node dict so that the subsequent set
596 #operation doesn't modify the oldvalues structure
597 #
598 #Revision 1.17 2001/12/14 23:42:57 richard
599 #yuck, a gdbm instance tests false :(
600 #I've left the debugging code in - it should be removed one day if we're ever
601 #_really_ anal about performace :)
602 #
603 #Revision 1.16 2001/12/12 03:23:14 richard
604 #Cor blimey this anydbm/whichdb stuff is yecchy. Turns out that whichdb
605 #incorrectly identifies a dbm file as a dbhash file on my system. This has
606 #been submitted to the python bug tracker as issue #491888:
607 #https://sourceforge.net/tracker/index.php?func=detail&aid=491888&group_id=5470&atid=105470
608 #
609 #Revision 1.15 2001/12/12 02:30:51 richard
610 #I fixed the problems with people whose anydbm was using the dbm module at the
611 #backend. It turns out the dbm module modifies the file name to append ".db"
612 #and my check to determine if we're opening an existing or new db just
613 #tested os.path.exists() on the filename. Well, no longer! We now perform a
614 #much better check _and_ cope with the anydbm implementation module changing
615 #too!
616 #I also fixed the backends __init__ so only ImportError is squashed.
617 #
618 #Revision 1.14 2001/12/10 22:20:01 richard
619 #Enabled transaction support in the bsddb backend. It uses the anydbm code
620 #where possible, only replacing methods where the db is opened (it uses the
621 #btree opener specifically.)
622 #Also cleaned up some change note generation.
623 #Made the backends package work with pydoc too.
624 #
625 #Revision 1.13 2001/12/02 05:06:16 richard
626 #. We now use weakrefs in the Classes to keep the database reference, so
627 # the close() method on the database is no longer needed.
628 # I bumped the minimum python requirement up to 2.1 accordingly.
629 #. #487480 ] roundup-server
630 #. #487476 ] INSTALL.txt
631 #
632 #I also cleaned up the change message / post-edit stuff in the cgi client.
633 #There's now a clearly marked "TODO: append the change note" where I believe
634 #the change note should be added there. The "changes" list will obviously
635 #have to be modified to be a dict of the changes, or somesuch.
636 #
637 #More testing needed.
638 #
639 #Revision 1.12 2001/12/01 07:17:50 richard
640 #. We now have basic transaction support! Information is only written to
641 # the database when the commit() method is called. Only the anydbm
642 # backend is modified in this way - neither of the bsddb backends have been.
643 # The mail, admin and cgi interfaces all use commit (except the admin tool
644 # doesn't have a commit command, so interactive users can't commit...)
645 #. Fixed login/registration forwarding the user to the right page (or not,
646 # on a failure)
647 #
648 #Revision 1.11 2001/11/21 02:34:18 richard
649 #Added a target version field to the extended issue schema
650 #
651 #Revision 1.10 2001/10/09 23:58:10 richard
652 #Moved the data stringification up into the hyperdb.Class class' get, set
653 #and create methods. This means that the data is also stringified for the
654 #journal call, and removes duplication of code from the backends. The
655 #backend code now only sees strings.
656 #
657 #Revision 1.9 2001/10/09 07:25:59 richard
658 #Added the Password property type. See "pydoc roundup.password" for
659 #implementation details. Have updated some of the documentation too.
660 #
661 #Revision 1.8 2001/09/29 13:27:00 richard
662 #CGI interfaces now spit up a top-level index of all the instances they can
663 #serve.
664 #
665 #Revision 1.7 2001/08/12 06:32:36 richard
666 #using isinstance(blah, Foo) now instead of isFooType
667 #
668 #Revision 1.6 2001/08/07 00:24:42 richard
669 #stupid typo
670 #
671 #Revision 1.5 2001/08/07 00:15:51 richard
672 #Added the copyright/license notice to (nearly) all files at request of
673 #Bizar Software.
674 #
675 #Revision 1.4 2001/07/30 01:41:36 richard
676 #Makes schema changes mucho easier.
677 #
678 #Revision 1.3 2001/07/25 01:23:07 richard
679 #Added the Roundup spec to the new documentation directory.
680 #
681 #Revision 1.2 2001/07/23 08:20:44 richard
682 #Moved over to using marshal in the bsddb and anydbm backends.
683 #roundup-admin now has a "freshen" command that'll load/save all nodes (not
684 # retired - mod hyperdb.Class.list() so it lists retired nodes)
685 #
686 #