1:
2: from OFS.Folder import Folder
3: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
4: from Products.PageTemplates.PageTemplateFile import PageTemplateFile
5: from Products.PythonScripts.standard import url_quote
6: from AccessControl import ClassSecurityInfo
7: from AccessControl import getSecurityManager
8: from Globals import package_home
9:
10: from Ft.Xml.Domlette import NonvalidatingReader
11: from Ft.Xml.Domlette import PrettyPrint, Print
12: from Ft.Xml import EMPTY_NAMESPACE, Parse
13:
14: from xml.dom.minidom import parse, parseString
15:
16:
17:
18: import Ft.Xml.XPath
19: import cStringIO
20: import xmlrpclib
21: import os.path
22: import sys
23: import cgi
24: import urllib
25: import logging
26: import math
27:
28: import urlparse
29: from types import *
30:
31: def logger(txt,method,txt2):
32: """logging"""
33: logging.info(txt+ txt2)
34:
35:
36: def getInt(number, default=0):
37: """returns always an int (0 in case of problems)"""
38: try:
39: return int(number)
40: except:
41: return int(default)
42:
43: def getTextFromNode(nodename):
44: """get the cdata content of a node"""
45: if nodename is None:
46: return ""
47: nodelist=nodename.childNodes
48: rc = ""
49: for node in nodelist:
50: if node.nodeType == node.TEXT_NODE:
51: rc = rc + node.data
52: return rc
53:
54: def serializeNode(node, encoding='utf-8'):
55: """returns a string containing node as XML"""
56: buf = cStringIO.StringIO()
57: Print(node, stream=buf, encoding=encoding)
58: s = buf.getvalue()
59: buf.close()
60: return s
61:
62:
63: def getParentDir(path):
64: """returns pathname shortened by one"""
65: return '/'.join(path.split('/')[0:-1])
66:
67:
68: import socket
69:
70: def urlopen(url,timeout=2):
71: """urlopen mit timeout"""
72: socket.setdefaulttimeout(timeout)
73: ret=urllib.urlopen(url)
74: socket.setdefaulttimeout(5)
75: return ret
76:
77:
78: ##
79: ## documentViewer class
80: ##
81: class documentViewer(Folder):
82: """document viewer"""
83: #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?"
84:
85: meta_type="Document viewer"
86:
87: security=ClassSecurityInfo()
88: manage_options=Folder.manage_options+(
89: {'label':'main config','action':'changeDocumentViewerForm'},
90: )
91:
92: # templates and forms
93: viewer_main = PageTemplateFile('zpt/viewer_main', globals())
94: toc_thumbs = PageTemplateFile('zpt/toc_thumbs', globals())
95: toc_text = PageTemplateFile('zpt/toc_text', globals())
96: toc_figures = PageTemplateFile('zpt/toc_figures', globals())
97: page_main_images = PageTemplateFile('zpt/page_main_images', globals())
98: page_main_text = PageTemplateFile('zpt/page_main_text', globals())
99: page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals())
100: page_main_xml = PageTemplateFile('zpt/page_main_xml', globals())
101: head_main = PageTemplateFile('zpt/head_main', globals())
102: docuviewer_css = PageTemplateFile('css/docuviewer.css', globals())
103: info_xml = PageTemplateFile('zpt/info_xml', globals())
104:
105: thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals())
106: security.declareProtected('View management screens','changeDocumentViewerForm')
107: changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals())
108:
109:
110: def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"):
111: """init document viewer"""
112: self.id=id
113: self.title=title
114: self.thumbcols = thumbcols
115: self.thumbrows = thumbrows
116: # authgroups is list of authorized groups (delimited by ,)
117: self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
118: # create template folder so we can always use template.something
119:
120: templateFolder = Folder('template')
121: #self['template'] = templateFolder # Zope-2.12 style
122: self._setObject('template',templateFolder) # old style
123: try:
124: from Products.XMLRpcTools.XMLRpcTools import XMLRpcServerProxy
125: xmlRpcClient = XMLRpcServerProxy(id='fulltextclient', serverUrl=textServerName, use_xmlrpc=False)
126: #templateFolder['fulltextclient'] = xmlRpcClient
127: templateFolder._setObject('fulltextclient',xmlRpcClient)
128: except Exception, e:
129: logging.error("Unable to create XMLRpcTools for fulltextclient: "+str(e))
130: try:
131: from Products.zogiLib.zogiLib import zogiLib
132: zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book")
133: #templateFolder['zogilib'] = zogilib
134: templateFolder._setObject('zogilib',zogilib)
135: except Exception, e:
136: logging.error("Unable to create zogiLib for zogilib: "+str(e))
137:
138:
139: security.declareProtected('View','thumbs_rss')
140: def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1):
141: '''
142: view it
143: @param mode: defines how to access the document behind url
144: @param url: url which contains display information
145: @param viewMode: if images display images, if text display text, default is images (text,images or auto)
146:
147: '''
148: logging.debug("HHHHHHHHHHHHHH:load the rss")
149: logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
150:
151: if not hasattr(self, 'template'):
152: # create template folder if it doesn't exist
153: self.manage_addFolder('template')
154:
155: if not self.digilibBaseUrl:
156: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
157:
158: docinfo = self.getDocinfo(mode=mode,url=url)
159: pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
160: pt = getattr(self.template, 'thumbs_main_rss')
161:
162: if viewMode=="auto": # automodus gewaehlt
163: if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
164: viewMode="text"
165: else:
166: viewMode="images"
167:
168: return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode)
169:
170: security.declareProtected('View','index_html')
171: def index_html(self,url,mode="texttool",viewMode="auto",tocMode="thumbs",start=None,pn=1,mk=None, query=None, querySearch=None):
172: '''
173: view it
174: @param mode: defines how to access the document behind url
175: @param url: url which contains display information
176: @param viewMode: if images display images, if text display text, default is auto (text,images or auto)
177: @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, none)
178: @param querySearch: type of different search modes (fulltext, fulltextMorph, xpath, xquery, ftIndex, ftIndexMorph, fulltextMorphLemma)
179: '''
180:
181: logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
182:
183: if not hasattr(self, 'template'):
184: # this won't work
185: logging.error("template folder missing!")
186: return "ERROR: template folder missing!"
187:
188: if not getattr(self, 'digilibBaseUrl', None):
189: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
190:
191: docinfo = self.getDocinfo(mode=mode,url=url)
192:
193:
194: if tocMode != "thumbs":
195: # get table of contents
196: docinfo = self.getToc(mode=tocMode, docinfo=docinfo)
197:
198: if viewMode=="auto": # automodus gewaehlt
199: if docinfo.get("textURL",''): #texturl gesetzt und textViewer konfiguriert
200: viewMode="text_dict"
201: else:
202: viewMode="images"
203:
204: pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo,viewMode=viewMode,tocMode=tocMode)
205:
206: pt = getattr(self.template, 'viewer_main')
207: return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk))
208:
209: def generateMarks(self,mk):
210: ret=""
211: if mk is None:
212: return ""
213: if type(mk) is not ListType:
214: mk=[mk]
215: for m in mk:
216: ret+="mk=%s"%m
217: return ret
218:
219:
220: def findDigilibUrl(self):
221: """try to get the digilib URL from zogilib"""
222: url = self.template.zogilib.getDLBaseUrl()
223: return url
224:
225: def getDocumentViewerURL(self):
226: """returns the URL of this instance"""
227: return self.absolute_url()
228:
229: def getStyle(self, idx, selected, style=""):
230: """returns a string with the given style and append 'sel' if path == selected."""
231: #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style))
232: if idx == selected:
233: return style + 'sel'
234: else:
235: return style
236:
237: def getLink(self,param=None,val=None):
238: """link to documentviewer with parameter param set to val"""
239: params=self.REQUEST.form.copy()
240: if param is not None:
241: if val is None:
242: if params.has_key(param):
243: del params[param]
244: else:
245: params[param] = str(val)
246:
247: if params.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath
248: params["mode"] = "imagepath"
249: params["url"] = getParentDir(params["url"])
250:
251: # quote values and assemble into query string
252: ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
253: url=self.REQUEST['URL1']+"?"+ps
254: return url
255:
256: def getLinkAmp(self,param=None,val=None):
257: """link to documentviewer with parameter param set to val"""
258: params=self.REQUEST.form.copy()
259: if param is not None:
260: if val is None:
261: if params.has_key(param):
262: del params[param]
263: else:
264: params[param] = str(val)
265:
266: # quote values and assemble into query string
267: logging.info("XYXXXXX: %s"%repr(params.items()))
268: ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
269: url=self.REQUEST['URL1']+"?"+ps
270: return url
271:
272: def getInfo_xml(self,url,mode):
273: """returns info about the document as XML"""
274:
275: if not self.digilibBaseUrl:
276: self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
277:
278: docinfo = self.getDocinfo(mode=mode,url=url)
279: pt = getattr(self.template, 'info_xml')
280: return pt(docinfo=docinfo)
281:
282:
283: def isAccessible(self, docinfo):
284: """returns if access to the resource is granted"""
285: access = docinfo.get('accessType', None)
286: logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access)
287: if access is not None and access == 'free':
288: logger("documentViewer (accessOK)", logging.INFO, "access is free")
289: return True
290: elif access is None or access in self.authgroups:
291: # only local access -- only logged in users
292: user = getSecurityManager().getUser()
293: if user is not None:
294: #print "user: ", user
295: return (user.getUserName() != "Anonymous User")
296: else:
297: return False
298:
299: logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access)
300: return False
301:
302:
303: def getDirinfoFromDigilib(self,path,docinfo=None,cut=0):
304: """gibt param von dlInfo aus"""
305: num_retries = 3
306: if docinfo is None:
307: docinfo = {}
308:
309: for x in range(cut):
310:
311: path=getParentDir(path)
312:
313: infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path
314:
315: logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl))
316:
317: for cnt in range(num_retries):
318: try:
319: # dom = NonvalidatingReader.parseUri(imageUrl)
320: txt=urllib.urlopen(infoUrl).read()
321: dom = Parse(txt)
322: break
323: except:
324: logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt))
325: else:
326: raise IOError("Unable to get dir-info from %s"%(infoUrl))
327:
328: sizes=dom.xpath("//dir/size")
329: logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes)
330:
331: if sizes:
332: docinfo['numPages'] = int(getTextFromNode(sizes[0]))
333: else:
334: docinfo['numPages'] = 0
335:
336: # TODO: produce and keep list of image names and numbers
337:
338: return docinfo
339:
340:
341: def getIndexMeta(self, url):
342: """returns dom of index.meta document at url"""
343: num_retries = 3
344: dom = None
345: metaUrl = None
346: if url.startswith("http://"):
347: # real URL
348: metaUrl = url
349: else:
350: # online path
351: server=self.digilibBaseUrl+"/servlet/Texter?fn="
352: metaUrl=server+url.replace("/mpiwg/online","")
353: if not metaUrl.endswith("index.meta"):
354: metaUrl += "/index.meta"
355: logging.debug("METAURL: %s"%metaUrl)
356: for cnt in range(num_retries):
357: try:
358: # patch dirk encoding fehler treten dann nicht mehr auf
359: # dom = NonvalidatingReader.parseUri(metaUrl)
360: txt=urllib.urlopen(metaUrl).read()
361: dom = Parse(txt)
362: break
363: except:
364: logger("ERROR documentViewer (getIndexMeta)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
365:
366: if dom is None:
367: raise IOError("Unable to read index meta from %s"%(url))
368:
369: return dom
370:
371: def getPresentationInfoXML(self, url):
372: """returns dom of info.xml document at url"""
373: num_retries = 3
374: dom = None
375: metaUrl = None
376: if url.startswith("http://"):
377: # real URL
378: metaUrl = url
379: else:
380: # online path
381: server=self.digilibBaseUrl+"/servlet/Texter?fn="
382: metaUrl=server+url.replace("/mpiwg/online","")
383:
384: for cnt in range(num_retries):
385: try:
386: # patch dirk encoding fehler treten dann nicht mehr auf
387: # dom = NonvalidatingReader.parseUri(metaUrl)
388: txt=urllib.urlopen(metaUrl).read()
389: dom = Parse(txt)
390: break
391: except:
392: logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
393:
394: if dom is None:
395: raise IOError("Unable to read infoXMLfrom %s"%(url))
396:
397: return dom
398:
399:
400: def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
401: """gets authorization info from the index.meta file at path or given by dom"""
402: logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path))
403:
404: access = None
405:
406: if docinfo is None:
407: docinfo = {}
408:
409: if dom is None:
410: for x in range(cut):
411: path=getParentDir(path)
412: dom = self.getIndexMeta(path)
413:
414: acctype = dom.xpath("//access-conditions/access/@type")
415: if acctype and (len(acctype)>0):
416: access=acctype[0].value
417: if access in ['group', 'institution']:
418: access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
419:
420: docinfo['accessType'] = access
421: return docinfo
422:
423:
424: def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
425: """gets bibliographical info from the index.meta file at path or given by dom"""
426: logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path))
427:
428: if docinfo is None:
429: docinfo = {}
430:
431: if dom is None:
432: for x in range(cut):
433: path=getParentDir(path)
434: dom = self.getIndexMeta(path)
435:
436: logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path))
437: # put in all raw bib fields as dict "bib"
438: bib = dom.xpath("//bib/*")
439: if bib and len(bib)>0:
440: bibinfo = {}
441: for e in bib:
442: bibinfo[e.localName] = getTextFromNode(e)
443: docinfo['bib'] = bibinfo
444:
445: # extract some fields (author, title, year) according to their mapping
446: metaData=self.metadata.main.meta.bib
447: bibtype=dom.xpath("//bib/@type")
448: if bibtype and (len(bibtype)>0):
449: bibtype=bibtype[0].value
450: else:
451: bibtype="generic"
452:
453: bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
454: docinfo['bib_type'] = bibtype
455: bibmap=metaData.generateMappingForType(bibtype)
456: # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
457: if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
458: try:
459: docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
460: except: pass
461: try:
462: docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
463: except: pass
464: try:
465: docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
466: except: pass
467: logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
468: try:
469: docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
470: except:
471: docinfo['lang']=''
472:
473: return docinfo
474:
475:
476: def getDocinfoFromTextTool(self, url, dom=None, docinfo=None):
477: """parse texttool tag in index meta"""
478: logger("documentViewer (getdocinfofromtexttool)", logging.INFO, "url: %s" % (url))
479: if docinfo is None:
480: docinfo = {}
481: if docinfo.get('lang', None) is None:
482: docinfo['lang'] = '' # default keine Sprache gesetzt
483: if dom is None:
484: dom = self.getIndexMeta(url)
485:
486: archivePath = None
487: archiveName = None
488:
489: archiveNames = dom.xpath("//resource/name")
490: if archiveNames and (len(archiveNames) > 0):
491: archiveName = getTextFromNode(archiveNames[0])
492: else:
493: logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/name missing in: %s" % (url))
494:
495: archivePaths = dom.xpath("//resource/archive-path")
496: if archivePaths and (len(archivePaths) > 0):
497: archivePath = getTextFromNode(archivePaths[0])
498: # clean up archive path
499: if archivePath[0] != '/':
500: archivePath = '/' + archivePath
501: if archiveName and (not archivePath.endswith(archiveName)):
502: archivePath += "/" + archiveName
503: else:
504: # try to get archive-path from url
505: logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/archive-path missing in: %s" % (url))
506: if (not url.startswith('http')):
507: archivePath = url.replace('index.meta', '')
508:
509: if archivePath is None:
510: # we balk without archive-path
511: raise IOError("Missing archive-path (for text-tool) in %s" % (url))
512:
513: imageDirs = dom.xpath("//texttool/image")
514: if imageDirs and (len(imageDirs) > 0):
515: imageDir = getTextFromNode(imageDirs[0])
516:
517: else:
518: # we balk with no image tag / not necessary anymore because textmode is now standard
519: #raise IOError("No text-tool info in %s"%(url))
520: imageDir = ""
521: #xquery="//pb"
522: docinfo['imagePath'] = "" # keine Bilder
523: docinfo['imageURL'] = ""
524:
525: if imageDir and archivePath:
526: #print "image: ", imageDir, " archivepath: ", archivePath
527: imageDir = os.path.join(archivePath, imageDir)
528: imageDir = imageDir.replace("/mpiwg/online", '')
529: docinfo = self.getDirinfoFromDigilib(imageDir, docinfo=docinfo)
530: docinfo['imagePath'] = imageDir
531:
532: docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir
533:
534: viewerUrls = dom.xpath("//texttool/digiliburlprefix")
535: if viewerUrls and (len(viewerUrls) > 0):
536: viewerUrl = getTextFromNode(viewerUrls[0])
537: docinfo['viewerURL'] = viewerUrl
538:
539: textUrls = dom.xpath("//texttool/text")
540: if textUrls and (len(textUrls) > 0):
541: textUrl = getTextFromNode(textUrls[0])
542: if urlparse.urlparse(textUrl)[0] == "": #keine url
543: textUrl = os.path.join(archivePath, textUrl)
544: # fix URLs starting with /mpiwg/online
545: if textUrl.startswith("/mpiwg/online"):
546: textUrl = textUrl.replace("/mpiwg/online", '', 1)
547:
548: docinfo['textURL'] = textUrl
549:
550: textUrls = dom.xpath("//texttool/text-url-path")
551: if textUrls and (len(textUrls) > 0):
552: textUrl = getTextFromNode(textUrls[0])
553: docinfo['textURLPath'] = textUrl
554: if not docinfo['imagePath']:
555: # text-only, no page images
556: docinfo = self.getNumPages(docinfo) #im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht
557:
558: presentationUrls = dom.xpath("//texttool/presentation")
559: docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get info von bib tag
560:
561: if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen
562: # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten
563: # durch den relativen Pfad auf die presentation infos
564: presentationPath = getTextFromNode(presentationUrls[0])
565: if url.endswith("index.meta"):
566: presentationUrl = url.replace('index.meta', presentationPath)
567: else:
568: presentationUrl = url + "/" + presentationPath
569:
570: docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl, docinfo=docinfo, dom=dom)
571:
572: docinfo = self.getAuthinfoFromIndexMeta(url, docinfo=docinfo, dom=dom) # get access info
573:
574: return docinfo
575:
576:
577: def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None):
578: """gets the bibliographical information from the preseantion entry in texttools
579: """
580: dom=self.getPresentationInfoXML(url)
581: try:
582: docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
583: except:
584: pass
585: try:
586: docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
587: except:
588: pass
589: try:
590: docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
591: except:
592: pass
593: return docinfo
594:
595: def getDocinfoFromImagePath(self,path,docinfo=None,cut=0):
596: """path ist the path to the images it assumes that the index.meta file is one level higher."""
597: logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path))
598: if docinfo is None:
599: docinfo = {}
600: path=path.replace("/mpiwg/online","")
601: docinfo['imagePath'] = path
602: docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut)
603:
604: pathorig=path
605: for x in range(cut):
606: path=getParentDir(path)
607: logging.error("PATH:"+path)
608: imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path
609: docinfo['imageURL'] = imageUrl
610:
611: #path ist the path to the images it assumes that the index.meta file is one level higher.
612: docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1)
613: docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1)
614: return docinfo
615:
616:
617: def getDocinfo(self, mode, url):
618: """returns docinfo depending on mode"""
619: logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url))
620: # look for cached docinfo in session
621: if self.REQUEST.SESSION.has_key('docinfo'):
622: docinfo = self.REQUEST.SESSION['docinfo']
623: # check if its still current
624: if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url:
625: logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo)
626: return docinfo
627: # new docinfo
628: docinfo = {'mode': mode, 'url': url}
629: if mode=="texttool": #index.meta with texttool information
630: docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo)
631: elif mode=="imagepath":
632: docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo)
633: elif mode=="filepath":
634: docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1)
635: else:
636: logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!")
637: raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode))
638:
639: logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo)
640: self.REQUEST.SESSION['docinfo'] = docinfo
641: return docinfo
642:
643:
644: def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None, viewMode=None, tocMode=None):
645: """returns pageinfo with the given parameters"""
646: pageinfo = {}
647: current = getInt(current)
648: pageinfo['current'] = current
649: rows = int(rows or self.thumbrows)
650: pageinfo['rows'] = rows
651: cols = int(cols or self.thumbcols)
652: pageinfo['cols'] = cols
653: grpsize = cols * rows
654: pageinfo['groupsize'] = grpsize
655: start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1)))
656: # int(current / grpsize) * grpsize +1))
657: pageinfo['start'] = start
658: pageinfo['end'] = start + grpsize
659: if (docinfo is not None) and ('numPages' in docinfo):
660: np = int(docinfo['numPages'])
661: pageinfo['end'] = min(pageinfo['end'], np)
662: pageinfo['numgroups'] = int(np / grpsize)
663: if np % grpsize > 0:
664: pageinfo['numgroups'] += 1
665:
666:
667: pageinfo['viewMode'] = viewMode
668: pageinfo['tocMode'] = tocMode
669: pageinfo['query'] = self.REQUEST.get('query',' ')
670: pageinfo['queryType'] = self.REQUEST.get('queryType',' ')
671: pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext')
672: pageinfo['textPN'] = self.REQUEST.get('textPN','1')
673: pageinfo['highlightQuery'] = self.REQUEST.get('highlightQuery','')
674: pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30')
675: pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '10')
676: pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1')
677: toc = int (pageinfo['tocPN'])
678: pageinfo['textPages'] =int (toc)
679:
680: if 'tocSize_%s'%tocMode in docinfo:
681: tocSize = int(docinfo['tocSize_%s'%tocMode])
682: tocPageSize = int(pageinfo['tocPageSize'])
683: # cached toc
684:
685: if tocSize%tocPageSize>0:
686: tocPages=tocSize/tocPageSize+1
687: else:
688: tocPages=tocSize/tocPageSize
689: pageinfo['tocPN'] = min (tocPages,toc)
690:
691: pageinfo['searchPN'] =self.REQUEST.get('searchPN','1')
692: pageinfo['sn'] =self.REQUEST.get('sn','')
693:
694: return pageinfo
695:
696: def getSearch(self, pn=1, pageinfo=None, docinfo=None, query=None, queryType=None):
697: """get search list"""
698: docpath = docinfo['textURLPath']
699: url = docinfo['url']
700: logging.debug("documentViewer (gettoc) docpath: %s"%(docpath))
701: logging.debug("documentViewer (gettoc) url: %s"%(url))
702: pagesize = pageinfo['queryPageSize']
703: pn = pageinfo['searchPN']
704: sn = pageinfo['sn']
705: highlightQuery = pageinfo['highlightQuery']
706: query =pageinfo['query']
707: queryType =pageinfo['queryType']
708: viewMode= pageinfo['viewMode']
709: tocMode = pageinfo['tocMode']
710: tocPN = pageinfo['tocPN']
711: selfurl = self.absolute_url()
712: page=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s&viewMode=%s&highlightQuery=%s"%(docpath, 'text', queryType, query, pagesize, pn, sn, viewMode,highlightQuery) ,outputUnicode=False)
713: pagexml = page.replace('?document=%s'%str(docpath),'?url=%s'%url)
714: pagedom = Parse(pagexml)
715: if (queryType=="fulltext")or(queryType=="xpath")or(queryType=="xquery")or(queryType=="fulltextMorphLemma"):
716: pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
717: if len(pagedivs)>0:
718: pagenode=pagedivs[0]
719: links=pagenode.xpath("//a")
720: for l in links:
721: hrefNode = l.getAttributeNodeNS(None, u"href")
722: if hrefNode:
723: href = hrefNode.nodeValue
724: if href.startswith('page-fragment.xql'):
725: selfurl = self.absolute_url()
726: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s'%(viewMode,queryType,query,pagesize,pn,tocMode,pn,tocPN))
727: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
728: return serializeNode(pagenode)
729:
730: if (queryType=="fulltextMorph"):
731: pagedivs = pagedom.xpath("//div[@class='queryResult']")
732: if len(pagedivs)>0:
733: pagenode=pagedivs[0]
734: links=pagenode.xpath("//a")
735: for l in links:
736: hrefNode = l.getAttributeNodeNS(None, u"href")
737: if hrefNode:
738: href = hrefNode.nodeValue
739: if href.startswith('page-fragment.xql'):
740: selfurl = self.absolute_url()
741: pagexml=href.replace('mode=text','mode=texttool&viewMode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&tocMode=%s&searchPN=%s&tocPN=%s'%(viewMode,queryType,query,pagesize,pn,tocMode,pn,tocPN))
742: hrefNode.nodeValue = pagexml.replace('page-fragment.xql','%s'%selfurl)
743: if href.startswith('../lt/lemma.xql'):
744:
745: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
746: l.setAttributeNS(None, 'target', '_blank')
747: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
748: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
749: pagedivs = pagedom.xpath("//div[@class='queryResultMorphExpansion']")
750: return serializeNode(pagenode)
751:
752: if (queryType=="ftIndex")or(queryType=="ftIndexMorph"):
753: pagedivs= pagedom.xpath("//div[@class='queryResultPage']")
754: if len(pagedivs)>0:
755: pagenode=pagedivs[0]
756: links=pagenode.xpath("//a")
757: for l in links:
758: hrefNode = l.getAttributeNodeNS(None, u"href")
759: if hrefNode:
760: href = hrefNode.nodeValue
761: hrefNode.nodeValue=href.replace('mode=text','mode=texttool&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s'%(viewMode,tocMode,tocPN,pn))
762:
763: if href.startswith('../lt/lex.xql'):
764: hrefNode.nodeValue = href.replace('../lt/lex.xql','%s/template/head_main_voc'%selfurl)
765: l.setAttributeNS(None, 'target', '_blank')
766: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
767: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
768: if href.startswith('../lt/lemma.xql'):
769: hrefNode.nodeValue = href.replace('../lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
770: l.setAttributeNS(None, 'target', '_blank')
771: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=400, scrollbars=1'); return false;")
772: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
773: return serializeNode(pagenode)
774: return "no text here"
775:
776: def getNumPages(self,docinfo=None):
777: """get list of pages from fulltext and put in docinfo"""
778: xquery = '//pb'
779: text = self.template.fulltextclient.eval("/mpdl/interface/xquery.xql", "document=%s&xquery=%s"%(docinfo['textURLPath'],xquery))
780: # TODO: better processing of the page list. do we need the info somewhere else also?
781: docinfo['numPages'] = text.count("<pb ")
782: return docinfo
783:
784: def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None, highlightQuery=None,sn=None, viewMode=None, tocMode=None, tocPN=None):
785: """returns single page from fulltext"""
786: docpath = docinfo['textURLPath']
787: path = docinfo['textURLPath']
788: url = docinfo['url']
789: viewMode= pageinfo['viewMode']
790: tocMode = pageinfo['tocMode']
791: tocPN = pageinfo['tocPN']
792: selfurl = self.absolute_url()
793:
794: #pn = pageinfo['searchPN']
795:
796: if mode == "text_dict":
797: textmode = "textPollux"
798: else:
799: textmode = mode
800:
801: textParam = "document=%s&mode=%s&pn=%s"%(docpath,textmode,pn)
802: if highlightQuery is not None:
803: textParam +="&highlightQuery=%s&sn=%s"%(highlightQuery,sn)
804:
805: pagexml=self.template.fulltextclient.eval("/mpdl/interface/page-fragment.xql", textParam, outputUnicode=False)
806: pagedom = Parse(pagexml)
807: # plain text mode
808: if mode == "text":
809: # first div contains text
810: pagedivs = pagedom.xpath("/div")
811: #queryResultPage
812: if len(pagedivs) > 0:
813: pagenode = pagedivs[0]
814: links = pagenode.xpath("//a")
815: for l in links:
816: hrefNode = l.getAttributeNodeNS(None, u"href")
817:
818: if hrefNode:
819: href= hrefNode.nodeValue
820: if href.startswith('#note-'):
821: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,viewMode,tocMode,tocPN,pn))
822:
823: return serializeNode(pagenode)
824: if mode == "xml":
825: # first div contains text
826: pagedivs = pagedom.xpath("/div")
827: if len(pagedivs) > 0:
828: pagenode = pagedivs[0]
829: return serializeNode(pagenode)
830: if mode == "pureXml":
831: # first div contains text
832: pagedivs = pagedom.xpath("/div")
833: if len(pagedivs) > 0:
834: pagenode = pagedivs[0]
835: return serializeNode(pagenode)
836: # text-with-links mode
837: if mode == "text_dict":
838: # first div contains text
839: pagedivs = pagedom.xpath("/div")
840: if len(pagedivs) > 0:
841: pagenode = pagedivs[0]
842: # check all a-tags
843: links = pagenode.xpath("//a")
844: for l in links:
845: hrefNode = l.getAttributeNodeNS(None, u"href")
846: if hrefNode:
847: # is link with href
848: href = hrefNode.nodeValue
849: if href.startswith('lt/lex.xql'):
850: # is pollux link
851: selfurl = self.absolute_url()
852: # change href
853: hrefNode.nodeValue = href.replace('lt/lex.xql','%s/template/head_main_voc'%selfurl)
854: # add target
855: l.setAttributeNS(None, 'target', '_blank')
856: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;")
857: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
858:
859: if href.startswith('lt/lemma.xql'):
860: selfurl = self.absolute_url()
861: hrefNode.nodeValue = href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
862: l.setAttributeNS(None, 'target', '_blank')
863: l.setAttributeNS(None, 'onClick',"popupWin = window.open(this.href, 'contacts', 'location,width=500,height=600,top=180, left=700, scrollbars=1'); return false;")
864: l.setAttributeNS(None, 'onDblclick', 'popupWin.focus();')
865:
866: if href.startswith('#note-'):
867: hrefNode.nodeValue = href.replace('#note-',"?url=%s&viewMode=%s&tocMode=%s&tocPN=%s&pn=%s#note-"%(url,viewMode,tocMode,tocPN,pn))
868:
869:
870: return serializeNode(pagenode)
871:
872: return "no text here"
873:
874: def getTranslate(self, query=None, language=None):
875: """translate into another languages"""
876: pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","document=&language="+str(language)+"&query="+url_quote(str(query)))
877: return pagexml
878:
879: def getLemma(self, lemma=None, language=None):
880: """simular words lemma """
881: pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","document=&language="+str(language)+"&lemma="+url_quote(str(lemma)))
882: #pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","lemma=%s&language=%s"%(lemma,language),outputUnicode=False)
883: return pagexml
884:
885: def getQuery (self, docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
886: """number of"""
887: docpath = docinfo['textURLPath']
888: pagesize = pageinfo['queryPageSize']
889: pn = pageinfo['searchPN']
890: query =pageinfo['query']
891: queryType =pageinfo['queryType']
892:
893: tocSearch = 0
894: tocDiv = None
895: pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, query, pagesize, pn) ,outputUnicode=False)
896:
897: pagedom = Parse(pagexml)
898: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
899: tocSearch = int(getTextFromNode(numdivs[0]))
900: tc=int((tocSearch/10)+1)
901: logging.debug("documentViewer (gettoc) tc: %s"%(tc))
902: return tc
903:
904: def getToc(self, mode="text", docinfo=None):
905: """loads table of contents and stores in docinfo"""
906: logging.debug("documentViewer (gettoc) mode: %s"%(mode))
907: if mode == "none":
908: return docinfo
909:
910: if 'tocSize_%s'%mode in docinfo:
911: # cached toc
912: return docinfo
913:
914: docpath = docinfo['textURLPath']
915: # we need to set a result set size
916: pagesize = 1000
917: pn = 1
918: if mode == "text":
919: queryType = "toc"
920: else:
921: queryType = mode
922: # number of entries in toc
923: tocSize = 0
924: tocDiv = None
925: pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType,pagesize,pn), outputUnicode=False)
926: # post-processing downloaded xml
927: pagedom = Parse(pagexml)
928: # get number of entries
929: numdivs = pagedom.xpath("//div[@class='queryResultHits']")
930: if len(numdivs) > 0:
931: tocSize = int(getTextFromNode(numdivs[0]))
932: # div contains text
933: #pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
934: #if len(pagedivs) > 0:
935: # tocDiv = pagedivs[0]
936:
937: docinfo['tocSize_%s'%mode] = tocSize
938: #docinfo['tocDiv_%s'%mode] = tocDiv
939: return docinfo
940:
941: def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
942: """returns single page from the table of contents"""
943: # TODO: this should use the cached TOC
944: if mode == "text":
945: queryType = "toc"
946: else:
947: queryType = mode
948: docpath = docinfo['textURLPath']
949: path = docinfo['textURLPath']
950: #logging.debug("documentViewer (gettoc) pathNomer: %s"%(pathNomer))
951: pagesize = pageinfo['tocPageSize']
952: pn = pageinfo['tocPN']
953: url = docinfo['url']
954: selfurl = self.absolute_url()
955: viewMode= pageinfo['viewMode']
956: tocMode = pageinfo['tocMode']
957: tocPN = pageinfo['tocPN']
958:
959: pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn), outputUnicode=False)
960: page = pagexml.replace('page-fragment.xql?document=%s'%str(path),'%s?url=%s&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl,url, viewMode, tocMode, tocPN))
961: text = page.replace('mode=image','mode=texttool')
962: return text
963: # post-processing downloaded xml
964: #pagedom = Parse(text)
965: # div contains text
966: #pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
967: #if len(pagedivs) > 0:
968: # pagenode = pagedivs[0]
969: # return serializeNode(pagenode)
970: #else:
971: # return "No TOC!"
972:
973: def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None):
974: """init document viewer"""
975: self.title=title
976: self.digilibBaseUrl = digilibBaseUrl
977: self.thumbrows = thumbrows
978: self.thumbcols = thumbcols
979: self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
980: if RESPONSE is not None:
981: RESPONSE.redirect('manage_main')
982:
983:
984:
985: def manage_AddDocumentViewerForm(self):
986: """add the viewer form"""
987: pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self)
988: return pt()
989:
990: def manage_AddDocumentViewer(self,id,imageScalerUrl="",textServerName="",title="",RESPONSE=None):
991: """add the viewer"""
992: newObj=documentViewer(id,imageScalerUrl=imageScalerUrl,title=title,textServerName=textServerName)
993: self._setObject(id,newObj)
994:
995: if RESPONSE is not None:
996: RESPONSE.redirect('manage_main')
997:
998:
999: ##
1000: ## DocumentViewerTemplate class
1001: ##
1002: class DocumentViewerTemplate(ZopePageTemplate):
1003: """Template for document viewer"""
1004: meta_type="DocumentViewer Template"
1005:
1006:
1007: def manage_addDocumentViewerTemplateForm(self):
1008: """Form for adding"""
1009: pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self)
1010: return pt()
1011:
1012: def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None,
1013: REQUEST=None, submit=None):
1014: "Add a Page Template with optional file content."
1015:
1016: self._setObject(id, DocumentViewerTemplate(id))
1017: ob = getattr(self, id)
1018: txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read()
1019: logging.info("txt %s:"%txt)
1020: ob.pt_edit(txt,"text/html")
1021: if title:
1022: ob.pt_setTitle(title)
1023: try:
1024: u = self.DestinationURL()
1025: except AttributeError:
1026: u = REQUEST['URL1']
1027:
1028: u = "%s/%s" % (u, urllib.quote(id))
1029: REQUEST.RESPONSE.redirect(u+'/manage_main')
1030: return ''
1031:
1032:
1033:
FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>