1:
2:
3: from OFS.Folder import Folder
4: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
5: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
6: from AccessControl import ClassSecurityInfo
7: from AccessControl import getSecurityManager
8: from Globals import package_home
9:
10: from Ft.Xml.Domlette import NonvalidatingReader
11: from Ft.Xml.Domlette import PrettyPrint, Print
12: from Ft.Xml import EMPTY_NAMESPACE, Parse
13:
14: import Ft.Xml.XPath
15:
16: import os.path
17: import sys
18: import cgi
19: import urllib
20: import logging
21: import math
22:
23: import urlparse
24:
25: def logger(txt,method,txt2):
26: """logging"""
27: logging.info(txt+ txt2)
28:
29:
30: def getInt(number, default=0):
31: """returns always an int (0 in case of problems)"""
32: try:
33: return int(number)
34: except:
35: return default
36:
37: def getTextFromNode(nodename):
38: """get the cdata content of a node"""
39: if nodename is None:
40: return ""
41: nodelist=nodename.childNodes
42: rc = ""
43: for node in nodelist:
44: if node.nodeType == node.TEXT_NODE:
45: rc = rc + node.data
46: return rc
47:
48:
49: def getParentDir(path):
50: """returns pathname shortened by one"""
51: return '/'.join(path.split('/')[0:-1])
52:
53:
54: import socket
55:
56: def urlopen(url,timeout=2):
57: """urlopen mit timeout"""
58: socket.setdefaulttimeout(timeout)
59: ret=urllib.urlopen(url)
60: socket.setdefaulttimeout(5)
61: return ret
62:
63:
64: ##
65: ## documentViewer class
66: ##
67: class documentViewer(Folder):
68: """document viewer"""
69: #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?"
70:
71: meta_type="Document viewer"
72:
73: security=ClassSecurityInfo()
74: manage_options=Folder.manage_options+(
75: {'label':'main config','action':'changeDocumentViewerForm'},
76: )
77:
78: # templates and forms
79: viewer_main = PageTemplateFile('zpt/viewer_main', globals())
80: thumbs_main = PageTemplateFile('zpt/thumbs_main', globals())
81: image_main = PageTemplateFile('zpt/image_main', globals())
82: head_main = PageTemplateFile('zpt/head_main', globals())
83: docuviewer_css = PageTemplateFile('css/docuviewer.css', globals())
84: info_xml = PageTemplateFile('zpt/info_xml', globals())
85:
86: security.declareProtected('View management screens','changeDocumentViewerForm')
87: changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals())
88:
89:
90: def __init__(self,id,imageViewerUrl,textViewerUrl=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=10,authgroups="mpiwg"):
91: """init document viewer"""
92: self.id=id
93: self.title=title
94: self.imageViewerUrl=imageViewerUrl
95: self.textViewerUrl=textViewerUrl
96:
97: if not digilibBaseUrl:
98: self.digilibBaseUrl = self.findDigilibUrl()
99: else:
100: self.digilibBaseUrl = digilibBaseUrl
101: self.thumbcols = thumbcols
102: self.thumbrows = thumbrows
103: # authgroups is list of authorized groups (delimited by ,)
104: self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
105: # add template folder so we can always use template.something
106: self.manage_addFolder('template')
107:
108:
109: security.declareProtected('View','index_html')
110: def index_html(self,mode,url,viewMode="auto",start=None,pn=1):
111: '''
112: view it
113: @param mode: defines how to access the document behind url
114: @param url: url which contains display information
115: @param viewMode: if images display images, if text display text, default is images (text,images or auto)
116:
117: '''
118:
119: logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
120:
121: if not hasattr(self, 'template'):
122: # create template folder if it doesn't exist
123: self.manage_addFolder('template')
124:
125: if not self.digilibBaseUrl:
126: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
127:
128: docinfo = self.getDocinfo(mode=mode,url=url)
129: pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
130: pt = getattr(self.template, 'viewer_main')
131:
132: if viewMode=="auto": # automodus gewaehlt
133: if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
134: viewMode="text"
135: else:
136: viewMode="images"
137:
138: return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode)
139:
140:
141: def getLink(self,param=None,val=None):
142: """link to documentviewer with parameter param set to val"""
143: params=self.REQUEST.form.copy()
144: if param is not None:
145: if val is None:
146: if params.has_key(param):
147: del params[param]
148: else:
149: params[param] = str(val)
150:
151: # quote values and assemble into query string
152: ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
153: url=self.REQUEST['URL1']+"?"+ps
154: return url
155:
156:
157: def getInfo_xml(self,url,mode):
158: """returns info about the document as XML"""
159:
160: if not self.digilibBaseUrl:
161: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
162:
163: docinfo = self.getDocinfo(mode=mode,url=url)
164: pt = getattr(self.template, 'info_xml')
165: return pt(docinfo=docinfo)
166:
167:
168: def getStyle(self, idx, selected, style=""):
169: """returns a string with the given style and append 'sel' if path == selected."""
170: #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style))
171: if idx == selected:
172: return style + 'sel'
173: else:
174: return style
175:
176:
177: def isAccessible(self, docinfo):
178: """returns if access to the resource is granted"""
179: access = docinfo.get('accessType', None)
180: logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access)
181: if access is not None and access == 'free':
182: logger("documentViewer (accessOK)", logging.INFO, "access is free")
183: return True
184: elif access is None or access in self.authgroups:
185: # only local access -- only logged in users
186: user = getSecurityManager().getUser()
187: if user is not None:
188: #print "user: ", user
189: return (user.getUserName() != "Anonymous User")
190: else:
191: return False
192:
193: logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access)
194: return False
195:
196:
197: def getDirinfoFromDigilib(self,path,docinfo=None):
198: """gibt param von dlInfo aus"""
199: num_retries = 3
200: if docinfo is None:
201: docinfo = {}
202:
203: infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path
204:
205: logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl))
206:
207: for cnt in range(num_retries):
208: try:
209: # dom = NonvalidatingReader.parseUri(imageUrl)
210: txt=urllib.urlopen(infoUrl).read()
211: dom = Parse(txt)
212: break
213: except:
214: logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt))
215: else:
216: raise IOError("Unable to get dir-info from %s"%(infoUrl))
217:
218: sizes=dom.xpath("//dir/size")
219: logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes)
220:
221: if sizes:
222: docinfo['numPages'] = int(getTextFromNode(sizes[0]))
223: else:
224: docinfo['numPages'] = 0
225:
226: return docinfo
227:
228:
229: def getIndexMeta(self, url):
230: """returns dom of index.meta document at url"""
231: num_retries = 3
232: dom = None
233: metaUrl = None
234: if url.startswith("http://"):
235: # real URL
236: metaUrl = url
237: else:
238: # online path
239: server=self.digilibBaseUrl+"/servlet/Texter?fn="
240: metaUrl=server+url.replace("/mpiwg/online","")
241: if not metaUrl.endswith("index.meta"):
242: metaUrl += "/index.meta"
243: print metaUrl
244: for cnt in range(num_retries):
245: try:
246: # patch dirk encoding fehler treten dann nicht mehr auf
247: # dom = NonvalidatingReader.parseUri(metaUrl)
248: txt=urllib.urlopen(metaUrl).read()
249: dom = Parse(txt)
250: break
251: except:
252: logger("ERROR documentViewer (getIndexMata)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
253:
254: if dom is None:
255: raise IOError("Unable to read index meta from %s"%(url))
256:
257: return dom
258:
259: def getPresentationInfoXML(self, url):
260: """returns dom of info.xml document at url"""
261: num_retries = 3
262: dom = None
263: metaUrl = None
264: if url.startswith("http://"):
265: # real URL
266: metaUrl = url
267: else:
268: # online path
269: server=self.digilibBaseUrl+"/servlet/Texter?fn="
270: metaUrl=server+url.replace("/mpiwg/online","")
271:
272:
273: for cnt in range(num_retries):
274: try:
275: # patch dirk encoding fehler treten dann nicht mehr auf
276: # dom = NonvalidatingReader.parseUri(metaUrl)
277: txt=urllib.urlopen(metaUrl).read()
278: dom = Parse(txt)
279: break
280: except:
281: logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
282:
283: if dom is None:
284: raise IOError("Unable to read infoXMLfrom %s"%(url))
285:
286: return dom
287:
288:
289: def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None):
290: """gets authorization info from the index.meta file at path or given by dom"""
291: logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path))
292:
293: access = None
294:
295: if docinfo is None:
296: docinfo = {}
297:
298: if dom is None:
299: dom = self.getIndexMeta(getParentDir(path))
300:
301: acctype = dom.xpath("//access-conditions/access/@type")
302: if acctype and (len(acctype)>0):
303: access=acctype[0].value
304: if access in ['group', 'institution']:
305: access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
306:
307: docinfo['accessType'] = access
308: return docinfo
309:
310:
311: def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None):
312: """gets bibliographical info from the index.meta file at path or given by dom"""
313: logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path))
314:
315: if docinfo is None:
316: docinfo = {}
317:
318: if dom is None:
319: dom = self.getIndexMeta(getParentDir(path))
320:
321: # put in all raw bib fields as dict "bib"
322: bib = dom.xpath("//bib/*")
323: if bib and len(bib)>0:
324: bibinfo = {}
325: for e in bib:
326: bibinfo[e.localName] = getTextFromNode(e)
327: docinfo['bib'] = bibinfo
328:
329: # extract some fields (author, title, year) according to their mapping
330: metaData=self.metadata.main.meta.bib
331: bibtype=dom.xpath("//bib/@type")
332: if bibtype and (len(bibtype)>0):
333: bibtype=bibtype[0].value
334: else:
335: bibtype="generic"
336:
337: bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
338: docinfo['bib_type'] = bibtype
339: bibmap=metaData.generateMappingForType(bibtype)
340: # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
341: if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
342: docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
343: docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
344: docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
345: logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
346: try:
347: docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
348: except:
349: docinfo['lang']=''
350:
351: return docinfo
352:
353:
354: def getDocinfoFromTextTool(self,url,dom=None,docinfo=None):
355: """parse texttool tag in index meta"""
356: logger("documentViewer (getdocinfofromtexttool)", logging.INFO,"url: %s"%(url))
357: if docinfo is None:
358: docinfo = {}
359:
360: if docinfo.get('lang',None) is None:
361: docinfo['lang']='' # default keine Sprache gesetzt
362: if dom is None:
363: dom = self.getIndexMeta(url)
364:
365: archivePath = None
366: archiveName = None
367:
368: archiveNames=dom.xpath("//resource/name")
369: if archiveNames and (len(archiveNames)>0):
370: archiveName=getTextFromNode(archiveNames[0])
371: else:
372: logger("documentViewer (getdocinfofromtexttool)", logging.WARNING,"resource/name missing in: %s"%(url))
373:
374: archivePaths=dom.xpath("//resource/archive-path")
375: if archivePaths and (len(archivePaths)>0):
376: archivePath=getTextFromNode(archivePaths[0])
377: # clean up archive path
378: if archivePath[0] != '/':
379: archivePath = '/' + archivePath
380: if archiveName and (not archivePath.endswith(archiveName)):
381: archivePath += "/" + archiveName
382: else:
383: # try to get archive-path from url
384: logger("documentViewer (getdocinfofromtexttool)", logging.WARNING,"resource/archive-path missing in: %s"%(url))
385: if (not url.startswith('http')):
386: archivePath = url.replace('index.meta', '')
387:
388: if archivePath is None:
389: # we balk without archive-path
390: raise IOError("Missing archive-path (for text-tool) in %s"%(url))
391:
392: imageDirs=dom.xpath("//texttool/image")
393: if imageDirs and (len(imageDirs)>0):
394: imageDir=getTextFromNode(imageDirs[0])
395: else:
396: # we balk with no image tag / not necessary anymore because textmode is now standard
397: #raise IOError("No text-tool info in %s"%(url))
398: imageDir=""
399: docinfo['numPages']=1 # im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht
400:
401: docinfo['imagePath'] = "" # keine Bilder
402: docinfo['imageURL'] = ""
403:
404: if imageDir and archivePath:
405: #print "image: ", imageDir, " archivepath: ", archivePath
406: imageDir=os.path.join(archivePath,imageDir)
407: imageDir=imageDir.replace("/mpiwg/online",'')
408: docinfo=self.getDirinfoFromDigilib(imageDir,docinfo=docinfo)
409: docinfo['imagePath'] = imageDir
410: docinfo['imageURL'] = self.digilibBaseUrl+"/servlet/Scaler?fn="+imageDir
411:
412: viewerUrls=dom.xpath("//texttool/digiliburlprefix")
413: if viewerUrls and (len(viewerUrls)>0):
414: viewerUrl=getTextFromNode(viewerUrls[0])
415: docinfo['viewerURL'] = viewerUrl
416:
417: textUrls=dom.xpath("//texttool/text")
418: if textUrls and (len(textUrls)>0):
419: textUrl=getTextFromNode(textUrls[0])
420: if urlparse.urlparse(textUrl)[0]=="": #keine url
421: textUrl=os.path.join(archivePath,textUrl)
422:
423: docinfo['textURL'] = textUrl
424:
425: presentationUrls=dom.xpath("//texttool/presentation")
426: docinfo = self.getBibinfoFromIndexMeta(url,docinfo=docinfo,dom=dom) # get info von bib tag
427:
428: if presentationUrls and (len(presentationUrls)>0): # ueberschreibe diese durch presentation informationen
429: # presentation url ergiebt sich ersetzen von index.meta in der url der fŸr die Metadaten
430: # durch den relativen Pfad auf die presentation infos
431: presentationUrl=url.replace('index.meta',getTextFromNode(presentationUrls[0]))
432: docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl,docinfo=docinfo,dom=dom)
433:
434: docinfo = self.getAuthinfoFromIndexMeta(url,docinfo=docinfo,dom=dom) # get access info
435: return docinfo
436:
437:
438: def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None):
439: """gets the bibliographical information from the preseantion entry in texttools
440: """
441: dom=self.getPresentationInfoXML(url)
442: docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
443: docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
444: #docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
445: return docinfo
446:
447: def getDocinfoFromImagePath(self,path,docinfo=None):
448: """path ist the path to the images it assumes that the index.meta file is one level higher."""
449: logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path))
450: if docinfo is None:
451: docinfo = {}
452: path=path.replace("/mpiwg/online","")
453: docinfo['imagePath'] = path
454: docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo)
455: imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path
456: docinfo['imageURL'] = imageUrl
457:
458: docinfo = self.getBibinfoFromIndexMeta(path,docinfo=docinfo)
459: docinfo = self.getAuthinfoFromIndexMeta(path,docinfo=docinfo)
460: return docinfo
461:
462:
463: def getDocinfo(self, mode, url):
464: """returns docinfo depending on mode"""
465: logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url))
466: # look for cached docinfo in session
467: if self.REQUEST.SESSION.has_key('docinfo'):
468: docinfo = self.REQUEST.SESSION['docinfo']
469: # check if its still current
470: if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url:
471: logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo)
472: return docinfo
473: # new docinfo
474: docinfo = {'mode': mode, 'url': url}
475: if mode=="texttool": #index.meta with texttool information
476: docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo)
477: elif mode=="imagepath":
478: docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo)
479: else:
480: logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!")
481: raise ValueError("Unknown mode %s"%(mode))
482:
483: logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo)
484: self.REQUEST.SESSION['docinfo'] = docinfo
485: return docinfo
486:
487:
488: def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None):
489: """returns pageinfo with the given parameters"""
490: pageinfo = {}
491: current = getInt(current)
492: pageinfo['current'] = current
493: rows = int(rows or self.thumbrows)
494: pageinfo['rows'] = rows
495: cols = int(cols or self.thumbcols)
496: pageinfo['cols'] = cols
497: grpsize = cols * rows
498: pageinfo['groupsize'] = grpsize
499: start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1)))
500: # int(current / grpsize) * grpsize +1))
501: pageinfo['start'] = start
502: pageinfo['end'] = start + grpsize
503: if docinfo is not None:
504: np = int(docinfo['numPages'])
505: pageinfo['end'] = min(pageinfo['end'], np)
506: pageinfo['numgroups'] = int(np / grpsize)
507: if np % grpsize > 0:
508: pageinfo['numgroups'] += 1
509:
510: return pageinfo
511:
512: def text(self,mode,url,pn):
513: """give text"""
514: if mode=="texttool": #index.meta with texttool information
515: (viewerUrl,imagepath,textpath)=parseUrlTextTool(url)
516:
517: #print textpath
518: try:
519: dom = NonvalidatingReader.parseUri(textpath)
520: except:
521: return None
522:
523: list=[]
524: nodes=dom.xpath("//pb")
525:
526: node=nodes[int(pn)-1]
527:
528: p=node
529:
530: while p.tagName!="p":
531: p=p.parentNode
532:
533:
534: endNode=nodes[int(pn)]
535:
536:
537: e=endNode
538:
539: while e.tagName!="p":
540: e=e.parentNode
541:
542:
543: next=node.parentNode
544:
545: #sammle s
546: while next and (next!=endNode.parentNode):
547: list.append(next)
548: next=next.nextSibling
549: list.append(endNode.parentNode)
550:
551: if p==e:# beide im selben paragraphen
552: pass
553: # else:
554: # next=p
555: # while next!=e:
556: # print next,e
557: # list.append(next)
558: # next=next.nextSibling
559: #
560: # for x in list:
561: # PrettyPrint(x)
562: #
563: # return list
564: #
565:
566: def findDigilibUrl(self):
567: """try to get the digilib URL from zogilib"""
568: url = self.imageViewerUrl[:-1] + "/getScalerUrl"
569: #print urlparse.urlparse(url)[0]
570: #print urlparse.urljoin(self.absolute_url(),url)
571: logging.info("finddigiliburl: %s"%urlparse.urlparse(url)[0])
572: logging.info("finddigiliburl: %s"%urlparse.urljoin(self.absolute_url(),url))
573:
574: try:
575: if urlparse.urlparse(url)[0]=='': #relative path
576: url=urlparse.urljoin(self.absolute_url()+"/",url)
577:
578: scaler = urlopen(url).read()
579: return scaler.replace("/servlet/Scaler?", "")
580: except:
581: return None
582:
583: def changeDocumentViewer(self,imageViewerUrl,textViewerUrl,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=10,authgroups='mpiwg',RESPONSE=None):
584: """init document viewer"""
585: self.title=title
586: self.imageViewerUrl=imageViewerUrl
587: self.textViewerUrl=textViewerUrl
588: self.digilibBaseUrl = digilibBaseUrl
589: self.thumbrows = thumbrows
590: self.thumbcols = thumbcols
591: self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
592: if RESPONSE is not None:
593: RESPONSE.redirect('manage_main')
594:
595:
596:
597:
598: # security.declareProtected('View management screens','renameImageForm')
599:
600: def manage_AddDocumentViewerForm(self):
601: """add the viewer form"""
602: pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self)
603: return pt()
604:
605: def manage_AddDocumentViewer(self,id,imageViewerUrl="",textViewerUrl="",title="",RESPONSE=None):
606: """add the viewer"""
607: newObj=documentViewer(id,imageViewerUrl,title=title,textViewerUrl=textViewerUrl)
608: self._setObject(id,newObj)
609:
610: if RESPONSE is not None:
611: RESPONSE.redirect('manage_main')
612:
613:
614: ##
615: ## DocumentViewerTemplate class
616: ##
617: class DocumentViewerTemplate(ZopePageTemplate):
618: """Template for document viewer"""
619: meta_type="DocumentViewer Template"
620:
621:
622: def manage_addDocumentViewerTemplateForm(self):
623: """Form for adding"""
624: pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self)
625: return pt()
626:
627: def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None,
628: REQUEST=None, submit=None):
629: "Add a Page Template with optional file content."
630:
631: self._setObject(id, DocumentViewerTemplate(id))
632: ob = getattr(self, id)
633: txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read()
634: logging.info("txt %s:"%txt)
635: ob.pt_edit(txt,"text/html")
636: if title:
637: ob.pt_setTitle(title)
638: try:
639: u = self.DestinationURL()
640: except AttributeError:
641: u = REQUEST['URL1']
642:
643: u = "%s/%s" % (u, urllib.quote(id))
644: REQUEST.RESPONSE.redirect(u+'/manage_main')
645: return ''
646:
647:
648:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>