1:
2:
3: from OFS.Folder import Folder
4: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
5: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
6: from AccessControl import ClassSecurityInfo
7: from AccessControl import getSecurityManager
8: from Globals import package_home
9:
10: from Ft.Xml.Domlette import NonvalidatingReader
11: from Ft.Xml.Domlette import PrettyPrint, Print
12: from Ft.Xml import EMPTY_NAMESPACE, Parse
13:
14: import Ft.Xml.XPath
15:
16: import os.path
17: import sys
18: import cgi
19: import urllib
20: import logging
21: import math
22:
23: import urlparse
24: from types import *
25: def logger(txt,method,txt2):
26: """logging"""
27: logging.info(txt+ txt2)
28:
29:
30: def getInt(number, default=0):
31: """returns always an int (0 in case of problems)"""
32: try:
33: return int(number)
34: except:
35: return int(default)
36:
37: def getTextFromNode(nodename):
38: """get the cdata content of a node"""
39: if nodename is None:
40: return ""
41: nodelist=nodename.childNodes
42: rc = ""
43: for node in nodelist:
44: if node.nodeType == node.TEXT_NODE:
45: rc = rc + node.data
46: return rc
47:
48:
49: def getParentDir(path):
50: """returns pathname shortened by one"""
51: return '/'.join(path.split('/')[0:-1])
52:
53:
54: import socket
55:
56: def urlopen(url,timeout=2):
57: """urlopen mit timeout"""
58: socket.setdefaulttimeout(timeout)
59: ret=urllib.urlopen(url)
60: socket.setdefaulttimeout(5)
61: return ret
62:
63:
64: ##
65: ## documentViewer class
66: ##
67: class documentViewer(Folder):
68: """document viewer"""
69: #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?"
70:
71: meta_type="Document viewer"
72:
73: security=ClassSecurityInfo()
74: manage_options=Folder.manage_options+(
75: {'label':'main config','action':'changeDocumentViewerForm'},
76: )
77:
78: # templates and forms
79: viewer_main = PageTemplateFile('zpt/viewer_main', globals())
80: thumbs_main = PageTemplateFile('zpt/thumbs_main', globals())
81: image_main = PageTemplateFile('zpt/image_main', globals())
82: head_main = PageTemplateFile('zpt/head_main', globals())
83: docuviewer_css = PageTemplateFile('css/docuviewer.css', globals())
84: info_xml = PageTemplateFile('zpt/info_xml', globals())
85:
86: thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals())
87: security.declareProtected('View management screens','changeDocumentViewerForm')
88: changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals())
89:
90:
91: def __init__(self,id,imageViewerUrl,textViewerUrl=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=10,authgroups="mpiwg"):
92: """init document viewer"""
93: self.id=id
94: self.title=title
95: self.imageViewerUrl=imageViewerUrl
96: self.textViewerUrl=textViewerUrl
97:
98: if not digilibBaseUrl:
99: self.digilibBaseUrl = self.findDigilibUrl()
100: else:
101: self.digilibBaseUrl = digilibBaseUrl
102: self.thumbcols = thumbcols
103: self.thumbrows = thumbrows
104: # authgroups is list of authorized groups (delimited by ,)
105: self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
106: # add template folder so we can always use template.something
107: self.manage_addFolder('template')
108:
109:
110: security.declareProtected('View','thumbs_rss')
111: def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1):
112: '''
113: view it
114: @param mode: defines how to access the document behind url
115: @param url: url which contains display information
116: @param viewMode: if images display images, if text display text, default is images (text,images or auto)
117:
118: '''
119: logging.info("HHHHHHHHHHHHHH:load the rss")
120: logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
121:
122: if not hasattr(self, 'template'):
123: # create template folder if it doesn't exist
124: self.manage_addFolder('template')
125:
126: if not self.digilibBaseUrl:
127: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
128:
129: docinfo = self.getDocinfo(mode=mode,url=url)
130: pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
131: pt = getattr(self.template, 'thumbs_main_rss')
132:
133: if viewMode=="auto": # automodus gewaehlt
134: if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
135: viewMode="text"
136: else:
137: viewMode="images"
138:
139: return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode)
140:
141: security.declareProtected('View','index_html')
142: def index_html(self,mode,url,viewMode="auto",start=None,pn=1,mk=None):
143: '''
144: view it
145: @param mode: defines how to access the document behind url
146: @param url: url which contains display information
147: @param viewMode: if images display images, if text display text, default is images (text,images or auto)
148:
149: '''
150:
151: logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
152:
153: if not hasattr(self, 'template'):
154: # create template folder if it doesn't exist
155: self.manage_addFolder('template')
156:
157: if not self.digilibBaseUrl:
158: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
159:
160: docinfo = self.getDocinfo(mode=mode,url=url)
161: pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
162: pt = getattr(self.template, 'viewer_main')
163:
164: if viewMode=="auto": # automodus gewaehlt
165: if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
166: viewMode="text"
167: else:
168: viewMode="images"
169:
170: return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk))
171:
172: def generateMarks(self,mk):
173: ret=""
174: if mk is None:
175: return ""
176:
177: if type(mk) is not ListType:
178: mk=[mk]
179: for m in mk:
180: ret+="mk=%s"%m
181: return ret
182:
183: def getLink(self,param=None,val=None):
184: """link to documentviewer with parameter param set to val"""
185: params=self.REQUEST.form.copy()
186: if param is not None:
187: if val is None:
188: if params.has_key(param):
189: del params[param]
190: else:
191: params[param] = str(val)
192:
193: # quote values and assemble into query string
194: ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
195: url=self.REQUEST['URL1']+"?"+ps
196: return url
197:
198: def getLinkAmp(self,param=None,val=None):
199: """link to documentviewer with parameter param set to val"""
200: params=self.REQUEST.form.copy()
201: if param is not None:
202: if val is None:
203: if params.has_key(param):
204: del params[param]
205: else:
206: params[param] = str(val)
207:
208: # quote values and assemble into query string
209: logging.info("XYXXXXX: %s"%repr(params.items()))
210: ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
211: url=self.REQUEST['URL1']+"?"+ps
212: return url
213: def getInfo_xml(self,url,mode):
214: """returns info about the document as XML"""
215:
216: if not self.digilibBaseUrl:
217: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
218:
219: docinfo = self.getDocinfo(mode=mode,url=url)
220: pt = getattr(self.template, 'info_xml')
221: return pt(docinfo=docinfo)
222:
223:
224: def getStyle(self, idx, selected, style=""):
225: """returns a string with the given style and append 'sel' if path == selected."""
226: #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style))
227: if idx == selected:
228: return style + 'sel'
229: else:
230: return style
231:
232:
233: def isAccessible(self, docinfo):
234: """returns if access to the resource is granted"""
235: access = docinfo.get('accessType', None)
236: logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access)
237: if access is not None and access == 'free':
238: logger("documentViewer (accessOK)", logging.INFO, "access is free")
239: return True
240: elif access is None or access in self.authgroups:
241: # only local access -- only logged in users
242: user = getSecurityManager().getUser()
243: if user is not None:
244: #print "user: ", user
245: return (user.getUserName() != "Anonymous User")
246: else:
247: return False
248:
249: logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access)
250: return False
251:
252:
253: def getDirinfoFromDigilib(self,path,docinfo=None,cut=0):
254: """gibt param von dlInfo aus"""
255: num_retries = 3
256: if docinfo is None:
257: docinfo = {}
258:
259: for x in range(cut):
260: path=getParentDir(path)
261: infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path
262:
263: logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl))
264:
265: for cnt in range(num_retries):
266: try:
267: # dom = NonvalidatingReader.parseUri(imageUrl)
268: txt=urllib.urlopen(infoUrl).read()
269: dom = Parse(txt)
270: break
271: except:
272: logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt))
273: else:
274: raise IOError("Unable to get dir-info from %s"%(infoUrl))
275:
276: sizes=dom.xpath("//dir/size")
277: logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes)
278:
279: if sizes:
280: docinfo['numPages'] = int(getTextFromNode(sizes[0]))
281: else:
282: docinfo['numPages'] = 0
283:
284: return docinfo
285:
286:
287: def getIndexMeta(self, url):
288: """returns dom of index.meta document at url"""
289: num_retries = 3
290: dom = None
291: metaUrl = None
292: if url.startswith("http://"):
293: # real URL
294: metaUrl = url
295: else:
296: # online path
297: server=self.digilibBaseUrl+"/servlet/Texter?fn="
298: metaUrl=server+url.replace("/mpiwg/online","")
299: if not metaUrl.endswith("index.meta"):
300: metaUrl += "/index.meta"
301: logging.debug("METAURL: %s"%metaUrl)
302: for cnt in range(num_retries):
303: try:
304: # patch dirk encoding fehler treten dann nicht mehr auf
305: # dom = NonvalidatingReader.parseUri(metaUrl)
306: txt=urllib.urlopen(metaUrl).read()
307: dom = Parse(txt)
308: break
309: except:
310: logger("ERROR documentViewer (getIndexMata)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
311:
312: if dom is None:
313: raise IOError("Unable to read index meta from %s"%(url))
314:
315: return dom
316:
317: def getPresentationInfoXML(self, url):
318: """returns dom of info.xml document at url"""
319: num_retries = 3
320: dom = None
321: metaUrl = None
322: if url.startswith("http://"):
323: # real URL
324: metaUrl = url
325: else:
326: # online path
327: server=self.digilibBaseUrl+"/servlet/Texter?fn="
328: metaUrl=server+url.replace("/mpiwg/online","")
329:
330:
331: for cnt in range(num_retries):
332: try:
333: # patch dirk encoding fehler treten dann nicht mehr auf
334: # dom = NonvalidatingReader.parseUri(metaUrl)
335: txt=urllib.urlopen(metaUrl).read()
336: dom = Parse(txt)
337: break
338: except:
339: logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
340:
341: if dom is None:
342: raise IOError("Unable to read infoXMLfrom %s"%(url))
343:
344: return dom
345:
346:
347: def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
348: """gets authorization info from the index.meta file at path or given by dom"""
349: logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path))
350:
351: access = None
352:
353: if docinfo is None:
354: docinfo = {}
355:
356: if dom is None:
357: for x in range(cut+1):
358: path=getParentDir(path)
359: dom = self.getIndexMeta(path)
360:
361: acctype = dom.xpath("//access-conditions/access/@type")
362: if acctype and (len(acctype)>0):
363: access=acctype[0].value
364: if access in ['group', 'institution']:
365: access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
366:
367: docinfo['accessType'] = access
368: return docinfo
369:
370:
371: def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
372: """gets bibliographical info from the index.meta file at path or given by dom"""
373: logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path))
374:
375: if docinfo is None:
376: docinfo = {}
377:
378: if dom is None:
379: for x in range(cut+1):
380: path=getParentDir(path)
381: dom = self.getIndexMeta(path)
382:
383: # put in all raw bib fields as dict "bib"
384: bib = dom.xpath("//bib/*")
385: if bib and len(bib)>0:
386: bibinfo = {}
387: for e in bib:
388: bibinfo[e.localName] = getTextFromNode(e)
389: docinfo['bib'] = bibinfo
390:
391: # extract some fields (author, title, year) according to their mapping
392: metaData=self.metadata.main.meta.bib
393: bibtype=dom.xpath("//bib/@type")
394: if bibtype and (len(bibtype)>0):
395: bibtype=bibtype[0].value
396: else:
397: bibtype="generic"
398:
399: bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
400: docinfo['bib_type'] = bibtype
401: bibmap=metaData.generateMappingForType(bibtype)
402: # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
403: if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
404: try:
405: docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
406: except: pass
407: try:
408: docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
409: except: pass
410: try:
411: docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
412: except: pass
413: logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
414: try:
415: docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
416: except:
417: docinfo['lang']=''
418:
419: return docinfo
420:
421:
422: def getDocinfoFromTextTool(self,url,dom=None,docinfo=None):
423: """parse texttool tag in index meta"""
424: logger("documentViewer (getdocinfofromtexttool)", logging.INFO,"url: %s"%(url))
425: if docinfo is None:
426: docinfo = {}
427:
428: if docinfo.get('lang',None) is None:
429: docinfo['lang']='' # default keine Sprache gesetzt
430: if dom is None:
431: dom = self.getIndexMeta(url)
432:
433: archivePath = None
434: archiveName = None
435:
436: archiveNames=dom.xpath("//resource/name")
437: if archiveNames and (len(archiveNames)>0):
438: archiveName=getTextFromNode(archiveNames[0])
439: else:
440: logger("documentViewer (getdocinfofromtexttool)", logging.WARNING,"resource/name missing in: %s"%(url))
441:
442: archivePaths=dom.xpath("//resource/archive-path")
443: if archivePaths and (len(archivePaths)>0):
444: archivePath=getTextFromNode(archivePaths[0])
445: # clean up archive path
446: if archivePath[0] != '/':
447: archivePath = '/' + archivePath
448: if archiveName and (not archivePath.endswith(archiveName)):
449: archivePath += "/" + archiveName
450: else:
451: # try to get archive-path from url
452: logger("documentViewer (getdocinfofromtexttool)", logging.WARNING,"resource/archive-path missing in: %s"%(url))
453: if (not url.startswith('http')):
454: archivePath = url.replace('index.meta', '')
455:
456: if archivePath is None:
457: # we balk without archive-path
458: raise IOError("Missing archive-path (for text-tool) in %s"%(url))
459:
460: imageDirs=dom.xpath("//texttool/image")
461: if imageDirs and (len(imageDirs)>0):
462: imageDir=getTextFromNode(imageDirs[0])
463: else:
464: # we balk with no image tag / not necessary anymore because textmode is now standard
465: #raise IOError("No text-tool info in %s"%(url))
466: imageDir=""
467: docinfo['numPages']=1 # im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht
468:
469: docinfo['imagePath'] = "" # keine Bilder
470: docinfo['imageURL'] = ""
471:
472: if imageDir and archivePath:
473: #print "image: ", imageDir, " archivepath: ", archivePath
474: imageDir=os.path.join(archivePath,imageDir)
475: imageDir=imageDir.replace("/mpiwg/online",'')
476: docinfo=self.getDirinfoFromDigilib(imageDir,docinfo=docinfo)
477: docinfo['imagePath'] = imageDir
478: docinfo['imageURL'] = self.digilibBaseUrl+"/servlet/Scaler?fn="+imageDir
479:
480: viewerUrls=dom.xpath("//texttool/digiliburlprefix")
481: if viewerUrls and (len(viewerUrls)>0):
482: viewerUrl=getTextFromNode(viewerUrls[0])
483: docinfo['viewerURL'] = viewerUrl
484:
485: textUrls=dom.xpath("//texttool/text")
486: if textUrls and (len(textUrls)>0):
487: textUrl=getTextFromNode(textUrls[0])
488: if urlparse.urlparse(textUrl)[0]=="": #keine url
489: textUrl=os.path.join(archivePath,textUrl)
490: # fix URLs starting with /mpiwg/online
491: if textUrl.startswith("/mpiwg/online"):
492: textUrl = textUrl.replace("/mpiwg/online",'',1)
493:
494: docinfo['textURL'] = textUrl
495:
496: presentationUrls=dom.xpath("//texttool/presentation")
497: docinfo = self.getBibinfoFromIndexMeta(url,docinfo=docinfo,dom=dom) # get info von bib tag
498:
499: if presentationUrls and (len(presentationUrls)>0): # ueberschreibe diese durch presentation informationen
500: # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten
501: # durch den relativen Pfad auf die presentation infos
502: presentationUrl=url.replace('index.meta',getTextFromNode(presentationUrls[0]))
503: docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl,docinfo=docinfo,dom=dom)
504:
505: docinfo = self.getAuthinfoFromIndexMeta(url,docinfo=docinfo,dom=dom) # get access info
506: return docinfo
507:
508:
509: def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None):
510: """gets the bibliographical information from the preseantion entry in texttools
511: """
512: dom=self.getPresentationInfoXML(url)
513: try:
514: docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
515: except:
516: pass
517: try:
518: docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
519: except:
520: pass
521: try:
522: docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
523: except:
524: pass
525: return docinfo
526:
527: def getDocinfoFromImagePath(self,path,docinfo=None,cut=0):
528: """path ist the path to the images it assumes that the index.meta file is one level higher."""
529: logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path))
530: if docinfo is None:
531: docinfo = {}
532: path=path.replace("/mpiwg/online","")
533: docinfo['imagePath'] = path
534: docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut)
535: imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path
536: docinfo['imageURL'] = imageUrl
537:
538: docinfo = self.getBibinfoFromIndexMeta(path,docinfo=docinfo,cut=cut)
539: docinfo = self.getAuthinfoFromIndexMeta(path,docinfo=docinfo,cut=cut)
540: return docinfo
541:
542:
543: def getDocinfo(self, mode, url):
544: """returns docinfo depending on mode"""
545: logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url))
546: # look for cached docinfo in session
547: if self.REQUEST.SESSION.has_key('docinfo'):
548: docinfo = self.REQUEST.SESSION['docinfo']
549: # check if its still current
550: if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url:
551: logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo)
552: return docinfo
553: # new docinfo
554: docinfo = {'mode': mode, 'url': url}
555: if mode=="texttool": #index.meta with texttool information
556: docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo)
557: elif mode=="imagepath":
558: docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo)
559: elif mode=="filepath":
560: docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1)
561: else:
562: logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!")
563: raise ValueError("Unknown mode %s"%(mode))
564:
565: logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo)
566: self.REQUEST.SESSION['docinfo'] = docinfo
567: return docinfo
568:
569:
570: def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None):
571: """returns pageinfo with the given parameters"""
572: pageinfo = {}
573: current = getInt(current)
574: pageinfo['current'] = current
575: rows = int(rows or self.thumbrows)
576: pageinfo['rows'] = rows
577: cols = int(cols or self.thumbcols)
578: pageinfo['cols'] = cols
579: grpsize = cols * rows
580: pageinfo['groupsize'] = grpsize
581: start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1)))
582: # int(current / grpsize) * grpsize +1))
583: pageinfo['start'] = start
584: pageinfo['end'] = start + grpsize
585: if docinfo is not None:
586: np = int(docinfo['numPages'])
587: pageinfo['end'] = min(pageinfo['end'], np)
588: pageinfo['numgroups'] = int(np / grpsize)
589: if np % grpsize > 0:
590: pageinfo['numgroups'] += 1
591: logging.debug("getPageInfo: %s"%repr(pageinfo))
592: return pageinfo
593:
594: def text(self,mode,url,pn):
595: """give text"""
596: if mode=="texttool": #index.meta with texttool information
597: (viewerUrl,imagepath,textpath)=parseUrlTextTool(url)
598:
599: #print textpath
600: try:
601: dom = NonvalidatingReader.parseUri(textpath)
602: except:
603: return None
604:
605: list=[]
606: nodes=dom.xpath("//pb")
607:
608: node=nodes[int(pn)-1]
609:
610: p=node
611:
612: while p.tagName!="p":
613: p=p.parentNode
614:
615:
616: endNode=nodes[int(pn)]
617:
618:
619: e=endNode
620:
621: while e.tagName!="p":
622: e=e.parentNode
623:
624:
625: next=node.parentNode
626:
627: #sammle s
628: while next and (next!=endNode.parentNode):
629: list.append(next)
630: next=next.nextSibling
631: list.append(endNode.parentNode)
632:
633: if p==e:# beide im selben paragraphen
634: pass
635: # else:
636: # next=p
637: # while next!=e:
638: # print next,e
639: # list.append(next)
640: # next=next.nextSibling
641: #
642: # for x in list:
643: # PrettyPrint(x)
644: #
645: # return list
646: #
647:
648: def findDigilibUrl(self):
649: """try to get the digilib URL from zogilib"""
650: url = self.imageViewerUrl[:-1] + "/getScalerUrl"
651: #print urlparse.urlparse(url)[0]
652: #print urlparse.urljoin(self.absolute_url(),url)
653: logging.info("finddigiliburl: %s"%urlparse.urlparse(url)[0])
654: logging.info("finddigiliburl: %s"%urlparse.urljoin(self.absolute_url(),url))
655:
656: try:
657: if urlparse.urlparse(url)[0]=='': #relative path
658: url=urlparse.urljoin(self.absolute_url()+"/",url)
659:
660: scaler = urlopen(url).read()
661: return scaler.replace("/servlet/Scaler?", "")
662: except:
663: return None
664:
665: def changeDocumentViewer(self,imageViewerUrl,textViewerUrl,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=10,authgroups='mpiwg',RESPONSE=None):
666: """init document viewer"""
667: self.title=title
668: self.imageViewerUrl=imageViewerUrl
669: self.textViewerUrl=textViewerUrl
670: self.digilibBaseUrl = digilibBaseUrl
671: self.thumbrows = thumbrows
672: self.thumbcols = thumbcols
673: self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
674: if RESPONSE is not None:
675: RESPONSE.redirect('manage_main')
676:
677:
678:
679:
680: # security.declareProtected('View management screens','renameImageForm')
681:
682: def manage_AddDocumentViewerForm(self):
683: """add the viewer form"""
684: pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self)
685: return pt()
686:
687: def manage_AddDocumentViewer(self,id,imageViewerUrl="",textViewerUrl="",title="",RESPONSE=None):
688: """add the viewer"""
689: newObj=documentViewer(id,imageViewerUrl,title=title,textViewerUrl=textViewerUrl)
690: self._setObject(id,newObj)
691:
692: if RESPONSE is not None:
693: RESPONSE.redirect('manage_main')
694:
695:
696: ##
697: ## DocumentViewerTemplate class
698: ##
699: class DocumentViewerTemplate(ZopePageTemplate):
700: """Template for document viewer"""
701: meta_type="DocumentViewer Template"
702:
703:
704: def manage_addDocumentViewerTemplateForm(self):
705: """Form for adding"""
706: pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self)
707: return pt()
708:
709: def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None,
710: REQUEST=None, submit=None):
711: "Add a Page Template with optional file content."
712:
713: self._setObject(id, DocumentViewerTemplate(id))
714: ob = getattr(self, id)
715: txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read()
716: logging.info("txt %s:"%txt)
717: ob.pt_edit(txt,"text/html")
718: if title:
719: ob.pt_setTitle(title)
720: try:
721: u = self.DestinationURL()
722: except AttributeError:
723: u = REQUEST['URL1']
724:
725: u = "%s/%s" % (u, urllib.quote(id))
726: REQUEST.RESPONSE.redirect(u+'/manage_main')
727: return ''
728:
729:
730:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>