File:  [Repository] / documentViewer / documentViewer.py
Revision 1.47: download - view: text, annotated - select for diffs - revision graph
Tue Apr 27 12:58:31 2010 UTC (14 years, 2 months ago) by abukhman
Branches: MAIN
CVS tags: HEAD
Last update

    1: 
    2: from OFS.Folder import Folder
    3: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
    4: from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
    5: from AccessControl import ClassSecurityInfo
    6: from AccessControl import getSecurityManager
    7: from Globals import package_home
    8: 
    9: from Ft.Xml.Domlette import NonvalidatingReader
   10: from Ft.Xml.Domlette import PrettyPrint, Print
   11: from Ft.Xml import EMPTY_NAMESPACE, Parse
   12: 
   13: from xml.dom.minidom import parse, parseString
   14: 
   15: 
   16: 
   17: import Ft.Xml.XPath
   18: import cStringIO
   19: import xmlrpclib
   20: import os.path
   21: import sys
   22: import cgi
   23: import urllib
   24: import logging
   25: import math
   26: 
   27: import urlparse 
   28: from types import *
   29: 
   30: def logger(txt,method,txt2):
   31:     """logging"""
   32:     logging.info(txt+ txt2)
   33:     
   34:     
   35: def getInt(number, default=0):
   36:     """returns always an int (0 in case of problems)"""
   37:     try:
   38:         return int(number)
   39:     except:
   40:         return int(default)
   41: 
   42: def getTextFromNode(nodename):
   43:     """get the cdata content of a node"""
   44:     if nodename is None:
   45:         return ""
   46:     nodelist=nodename.childNodes
   47:     rc = ""
   48:     for node in nodelist:
   49:         if node.nodeType == node.TEXT_NODE:
   50:            rc = rc + node.data
   51:     return rc
   52: 
   53: def serializeNode(node, encoding='utf-8'):
   54:     """returns a string containing node as XML"""
   55:     buf = cStringIO.StringIO()
   56:     Print(node, stream=buf, encoding=encoding)
   57:     s = buf.getvalue()
   58:     buf.close()
   59:     return s
   60: 
   61:         
   62: def getParentDir(path):
   63:     """returns pathname shortened by one"""
   64:     return '/'.join(path.split('/')[0:-1])
   65:         
   66: 
   67: import socket
   68: 
   69: def urlopen(url,timeout=2):
   70:         """urlopen mit timeout"""
   71:         socket.setdefaulttimeout(timeout)
   72:         ret=urllib.urlopen(url)
   73:         socket.setdefaulttimeout(5)
   74:         return ret
   75: 
   76: 
   77: ##
   78: ## documentViewer class
   79: ##
   80: class documentViewer(Folder):
   81:     """document viewer"""
   82:     #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?"
   83:     
   84:     meta_type="Document viewer"
   85:     
   86:     security=ClassSecurityInfo()
   87:     manage_options=Folder.manage_options+(
   88:         {'label':'main config','action':'changeDocumentViewerForm'},
   89:         )
   90: 
   91:     # templates and forms
   92:     viewer_main = PageTemplateFile('zpt/viewer_main', globals())
   93:     toc_thumbs = PageTemplateFile('zpt/toc_thumbs', globals())
   94:     toc_text = PageTemplateFile('zpt/toc_text', globals())
   95:     toc_figures = PageTemplateFile('zpt/toc_figures', globals())
   96:     page_main_images = PageTemplateFile('zpt/page_main_images', globals())
   97:     page_main_text = PageTemplateFile('zpt/page_main_text', globals())
   98:     page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals())
   99:     head_main = PageTemplateFile('zpt/head_main', globals())
  100:     docuviewer_css = PageTemplateFile('css/docuviewer.css', globals())
  101:     info_xml = PageTemplateFile('zpt/info_xml', globals())
  102: 
  103:     thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals())
  104:     security.declareProtected('View management screens','changeDocumentViewerForm')    
  105:     changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals())
  106: 
  107:     
  108:     def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"):
  109:         """init document viewer"""
  110:         self.id=id
  111:         self.title=title
  112:         self.thumbcols = thumbcols
  113:         self.thumbrows = thumbrows
  114:         # authgroups is list of authorized groups (delimited by ,)
  115:         self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
  116:         # create template folder so we can always use template.something
  117:         
  118:         templateFolder = Folder('template')
  119:         #self['template'] = templateFolder # Zope-2.12 style
  120:         self._setObject('template',templateFolder) # old style
  121:         try:
  122:             from Products.XMLRpcTools.XMLRpcTools import XMLRpcServerProxy
  123:             xmlRpcClient = XMLRpcServerProxy(id='fulltextclient', serverUrl=textServerName, use_xmlrpc=False)
  124:             #templateFolder['fulltextclient'] = xmlRpcClient
  125:             templateFolder._setObject('fulltextclient',xmlRpcClient)
  126:         except Exception, e:
  127:             logging.error("Unable to create XMLRpcTools for fulltextclient: "+str(e))
  128:         try:
  129:             from Products.zogiLib.zogiLib import zogiLib
  130:             zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book")
  131:             #templateFolder['zogilib'] = zogilib
  132:             templateFolder._setObject('zogilib',zogilib)
  133:         except Exception, e:
  134:             logging.error("Unable to create zogiLib for zogilib: "+str(e))
  135:         
  136: 
  137:     security.declareProtected('View','thumbs_rss')
  138:     def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1):
  139:         '''
  140:         view it
  141:         @param mode: defines how to access the document behind url 
  142:         @param url: url which contains display information
  143:         @param viewMode: if images display images, if text display text, default is images (text,images or auto)
  144:         
  145:         '''
  146:         logging.debug("HHHHHHHHHHHHHH:load the rss")
  147:         logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
  148:         
  149:         if not hasattr(self, 'template'):
  150:             # create template folder if it doesn't exist
  151:             self.manage_addFolder('template')
  152:             
  153:         if not self.digilibBaseUrl:
  154:             self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
  155:             
  156:         docinfo = self.getDocinfo(mode=mode,url=url)
  157:         pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
  158:         pt = getattr(self.template, 'thumbs_main_rss')
  159:         
  160:         if viewMode=="auto": # automodus gewaehlt
  161:             if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
  162:                 viewMode="text"
  163:             else:
  164:                 viewMode="images"
  165:                
  166:         return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode)
  167:   
  168:     security.declareProtected('View','index_html')
  169:     def index_html(self,url,mode="texttool",viewMode="auto",tocMode="thumbs",start=None,pn=1,mk=None, query=None, querySearch=None):
  170:         '''
  171:         view it
  172:         @param mode: defines how to access the document behind url 
  173:         @param url: url which contains display information
  174:         @param viewMode: if images display images, if text display text, default is auto (text,images or auto)
  175:         @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, search)
  176:         @param querySearch: type of different search modes (fulltext, fulltextMorph, xpath, xquery, ftIndex, ftIndexMorph)
  177:         '''
  178:         
  179:         logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
  180:         
  181:         if not hasattr(self, 'template'):
  182:             # this won't work
  183:             logging.error("template folder missing!")
  184:             return "ERROR: template folder missing!"
  185:             
  186:         if not getattr(self, 'digilibBaseUrl', None):
  187:             self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
  188:             
  189:         docinfo = self.getDocinfo(mode=mode,url=url)
  190:         
  191:         
  192:         if tocMode != "thumbs":
  193:             # get table of contents
  194:             docinfo = self.getToc(mode=tocMode, docinfo=docinfo)
  195:             
  196:         pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo,viewMode=viewMode,tocMode=tocMode)
  197:         
  198:         if viewMode=="auto": # automodus gewaehlt
  199:             if docinfo.get("textURL",''): #texturl gesetzt und textViewer konfiguriert
  200:                 viewMode="text"
  201:             else:
  202:                 viewMode="images"
  203:                 
  204:         pt = getattr(self.template, 'viewer_main')               
  205:         return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk))
  206:   
  207:     def generateMarks(self,mk):
  208:         ret=""
  209:         if mk is None:
  210:             return ""
  211:     	if type(mk) is not ListType:
  212:     		mk=[mk]
  213:         for m in mk:
  214:             ret+="mk=%s"%m
  215:         return ret
  216: 
  217: 
  218:     def findDigilibUrl(self):
  219:         """try to get the digilib URL from zogilib"""
  220:         url = self.template.zogilib.getDLBaseUrl()
  221:         return url
  222:     
  223:     def getStyle(self, idx, selected, style=""):
  224:         """returns a string with the given style and append 'sel' if path == selected."""
  225:         #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style))
  226:         if idx == selected:
  227:             return style + 'sel'
  228:         else:
  229:             return style
  230:     
  231:     def getLink(self,param=None,val=None):
  232:         """link to documentviewer with parameter param set to val"""
  233:         params=self.REQUEST.form.copy()
  234:         if param is not None:
  235:             if val is None:
  236:                 if params.has_key(param):
  237:                     del params[param]
  238:             else:
  239:                 params[param] = str(val)
  240:                 
  241:         if params.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath
  242:                 params["mode"] = "imagepath"
  243:                 params["url"] = getParentDir(params["url"])
  244:                 
  245:         # quote values and assemble into query string
  246:         ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
  247:         url=self.REQUEST['URL1']+"?"+ps
  248:         return url
  249: 
  250:     def getLinkAmp(self,param=None,val=None):
  251:         """link to documentviewer with parameter param set to val"""
  252:         params=self.REQUEST.form.copy()
  253:         if param is not None:
  254:             if val is None:
  255:                 if params.has_key(param):
  256:                     del params[param]
  257:             else:
  258:                 params[param] = str(val)
  259:                 
  260:         # quote values and assemble into query string
  261:         logging.info("XYXXXXX: %s"%repr(params.items()))
  262:         ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
  263:         url=self.REQUEST['URL1']+"?"+ps
  264:         return url
  265:     
  266:     def getInfo_xml(self,url,mode):
  267:         """returns info about the document as XML"""
  268: 
  269:         if not self.digilibBaseUrl:
  270:             self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
  271:         
  272:         docinfo = self.getDocinfo(mode=mode,url=url)
  273:         pt = getattr(self.template, 'info_xml')
  274:         return pt(docinfo=docinfo)
  275: 
  276:     
  277:     def isAccessible(self, docinfo):
  278:         """returns if access to the resource is granted"""
  279:         access = docinfo.get('accessType', None)
  280:         logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access)
  281:         if access is not None and access == 'free':
  282:             logger("documentViewer (accessOK)", logging.INFO, "access is free")
  283:             return True
  284:         elif access is None or access in self.authgroups:
  285:             # only local access -- only logged in users
  286:             user = getSecurityManager().getUser()
  287:             if user is not None:
  288:                 #print "user: ", user
  289:                 return (user.getUserName() != "Anonymous User")
  290:             else:
  291:                 return False
  292:         
  293:         logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access)
  294:         return False
  295:     
  296:                 
  297:     def getDirinfoFromDigilib(self,path,docinfo=None,cut=0):
  298:         """gibt param von dlInfo aus"""
  299:         num_retries = 3
  300:         if docinfo is None:
  301:             docinfo = {}
  302:         
  303:         for x in range(cut):
  304:                
  305:                 path=getParentDir(path)
  306:        
  307:         infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path
  308:     
  309:         logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl))
  310:         
  311:         for cnt in range(num_retries):
  312:             try:
  313:                 # dom = NonvalidatingReader.parseUri(imageUrl)
  314:                 txt=urllib.urlopen(infoUrl).read()
  315:                 dom = Parse(txt)
  316:                 break
  317:             except:
  318:                 logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt))
  319:         else:
  320:             raise IOError("Unable to get dir-info from %s"%(infoUrl))
  321:         
  322:         sizes=dom.xpath("//dir/size")
  323:         logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes)
  324:         
  325:         if sizes:
  326:             docinfo['numPages'] = int(getTextFromNode(sizes[0]))
  327:         else:
  328:             docinfo['numPages'] = 0
  329:             
  330:         # TODO: produce and keep list of image names and numbers
  331:                         
  332:         return docinfo
  333:     
  334:             
  335:     def getIndexMeta(self, url):
  336:         """returns dom of index.meta document at url"""
  337:         num_retries = 3
  338:         dom = None
  339:         metaUrl = None
  340:         if url.startswith("http://"):
  341:             # real URL
  342:             metaUrl = url
  343:         else:
  344:             # online path
  345:             server=self.digilibBaseUrl+"/servlet/Texter?fn="
  346:             metaUrl=server+url.replace("/mpiwg/online","")
  347:             if not metaUrl.endswith("index.meta"):
  348:                 metaUrl += "/index.meta"
  349:         logging.debug("METAURL: %s"%metaUrl)
  350:         for cnt in range(num_retries):
  351:             try:
  352:                 # patch dirk encoding fehler treten dann nicht mehr auf
  353:                 # dom = NonvalidatingReader.parseUri(metaUrl)
  354:                 txt=urllib.urlopen(metaUrl).read()
  355:                 dom = Parse(txt)
  356:                 break
  357:             except:
  358:                 logger("ERROR documentViewer (getIndexMeta)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
  359:                 
  360:         if dom is None:
  361:             raise IOError("Unable to read index meta from %s"%(url))
  362:                  
  363:         return dom
  364:     
  365:     def getPresentationInfoXML(self, url):
  366:         """returns dom of info.xml document at url"""
  367:         num_retries = 3
  368:         dom = None
  369:         metaUrl = None
  370:         if url.startswith("http://"):
  371:             # real URL
  372:             metaUrl = url
  373:         else:
  374:             # online path
  375:             server=self.digilibBaseUrl+"/servlet/Texter?fn="
  376:             metaUrl=server+url.replace("/mpiwg/online","")
  377:         
  378:         for cnt in range(num_retries):
  379:             try:
  380:                 # patch dirk encoding fehler treten dann nicht mehr auf
  381:                 # dom = NonvalidatingReader.parseUri(metaUrl)
  382:                 txt=urllib.urlopen(metaUrl).read()
  383:                 dom = Parse(txt)
  384:                 break
  385:             except:
  386:                 logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
  387:                 
  388:         if dom is None:
  389:             raise IOError("Unable to read infoXMLfrom %s"%(url))
  390:                  
  391:         return dom
  392:                         
  393:         
  394:     def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
  395:         """gets authorization info from the index.meta file at path or given by dom"""
  396:         logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path))
  397:         
  398:         access = None
  399:         
  400:         if docinfo is None:
  401:             docinfo = {}
  402:             
  403:         if dom is None:
  404:             for x in range(cut):
  405:                 path=getParentDir(path)
  406:             dom = self.getIndexMeta(path)
  407:        
  408:         acctype = dom.xpath("//access-conditions/access/@type")
  409:         if acctype and (len(acctype)>0):
  410:             access=acctype[0].value
  411:             if access in ['group', 'institution']:
  412:                 access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
  413:             
  414:         docinfo['accessType'] = access
  415:         return docinfo
  416:     
  417:         
  418:     def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
  419:         """gets bibliographical info from the index.meta file at path or given by dom"""
  420:         logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path))
  421:         
  422:         if docinfo is None:
  423:             docinfo = {}
  424:         
  425:         if dom is None:
  426:             for x in range(cut):
  427:                 path=getParentDir(path)
  428:             dom = self.getIndexMeta(path)
  429:         
  430:         logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path))
  431:         # put in all raw bib fields as dict "bib"
  432:         bib = dom.xpath("//bib/*")
  433:         if bib and len(bib)>0:
  434:             bibinfo = {}
  435:             for e in bib:
  436:                 bibinfo[e.localName] = getTextFromNode(e)
  437:             docinfo['bib'] = bibinfo
  438:         
  439:         # extract some fields (author, title, year) according to their mapping
  440:         metaData=self.metadata.main.meta.bib
  441:         bibtype=dom.xpath("//bib/@type")
  442:         if bibtype and (len(bibtype)>0):
  443:             bibtype=bibtype[0].value
  444:         else:
  445:             bibtype="generic"
  446:             
  447:         bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
  448:         docinfo['bib_type'] = bibtype
  449:         bibmap=metaData.generateMappingForType(bibtype)
  450:         # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
  451:         if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
  452:             try:
  453:                 docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
  454:             except: pass
  455:             try:
  456:                 docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
  457:             except: pass
  458:             try:
  459:                 docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
  460:             except: pass
  461:             logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
  462:             try:
  463:                 docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
  464:             except:
  465:                 docinfo['lang']=''
  466: 
  467:         return docinfo
  468:     
  469:     
  470:     def getDocinfoFromTextTool(self, url, dom=None, docinfo=None):
  471:         """parse texttool tag in index meta"""
  472:         logger("documentViewer (getdocinfofromtexttool)", logging.INFO, "url: %s" % (url))
  473:         if docinfo is None:
  474:            docinfo = {}
  475:         if docinfo.get('lang', None) is None:
  476:             docinfo['lang'] = '' # default keine Sprache gesetzt
  477:         if dom is None:
  478:             dom = self.getIndexMeta(url)
  479:         
  480:         archivePath = None
  481:         archiveName = None
  482:     
  483:         archiveNames = dom.xpath("//resource/name")
  484:         if archiveNames and (len(archiveNames) > 0):
  485:             archiveName = getTextFromNode(archiveNames[0])
  486:         else:
  487:             logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/name missing in: %s" % (url))
  488:         
  489:         archivePaths = dom.xpath("//resource/archive-path")
  490:         if archivePaths and (len(archivePaths) > 0):
  491:             archivePath = getTextFromNode(archivePaths[0])
  492:             # clean up archive path
  493:             if archivePath[0] != '/':
  494:                 archivePath = '/' + archivePath
  495:             if archiveName and (not archivePath.endswith(archiveName)):
  496:                 archivePath += "/" + archiveName
  497:         else:
  498:             # try to get archive-path from url
  499:             logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/archive-path missing in: %s" % (url))
  500:             if (not url.startswith('http')):
  501:                 archivePath = url.replace('index.meta', '')
  502:                 
  503:         if archivePath is None:
  504:             # we balk without archive-path
  505:             raise IOError("Missing archive-path (for text-tool) in %s" % (url))
  506:         
  507:         imageDirs = dom.xpath("//texttool/image")
  508:         if imageDirs and (len(imageDirs) > 0):
  509:             imageDir = getTextFromNode(imageDirs[0])
  510:             
  511:         else:
  512:             # we balk with no image tag / not necessary anymore because textmode is now standard
  513:             #raise IOError("No text-tool info in %s"%(url))
  514:             imageDir = ""
  515:             #xquery="//pb"  
  516:             docinfo['imagePath'] = "" # keine Bilder
  517:             docinfo['imageURL'] = ""
  518:             
  519:         if imageDir and archivePath:
  520:             #print "image: ", imageDir, " archivepath: ", archivePath
  521:             imageDir = os.path.join(archivePath, imageDir)
  522:             imageDir = imageDir.replace("/mpiwg/online", '')
  523:             docinfo = self.getDirinfoFromDigilib(imageDir, docinfo=docinfo)
  524:             docinfo['imagePath'] = imageDir
  525:             
  526:             docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir
  527:             
  528:         viewerUrls = dom.xpath("//texttool/digiliburlprefix")
  529:         if viewerUrls and (len(viewerUrls) > 0):
  530:             viewerUrl = getTextFromNode(viewerUrls[0])
  531:             docinfo['viewerURL'] = viewerUrl
  532:                    
  533:         textUrls = dom.xpath("//texttool/text")
  534:         if textUrls and (len(textUrls) > 0):
  535:             textUrl = getTextFromNode(textUrls[0])
  536:             if urlparse.urlparse(textUrl)[0] == "": #keine url
  537:                 textUrl = os.path.join(archivePath, textUrl) 
  538:             # fix URLs starting with /mpiwg/online
  539:             if textUrl.startswith("/mpiwg/online"):
  540:                 textUrl = textUrl.replace("/mpiwg/online", '', 1)
  541:             
  542:             docinfo['textURL'] = textUrl
  543:     
  544:         textUrls = dom.xpath("//texttool/text-url-path")
  545:         if textUrls and (len(textUrls) > 0):
  546:             textUrl = getTextFromNode(textUrls[0])
  547:             docinfo['textURLPath'] = textUrl   
  548:          
  549:         presentationUrls = dom.xpath("//texttool/presentation")
  550:         docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom)   # get info von bib tag
  551:         
  552:         if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen 
  553:              # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten
  554:              # durch den relativen Pfad auf die presentation infos
  555:             presentationPath = getTextFromNode(presentationUrls[0])
  556:             if url.endswith("index.meta"): 
  557:                 presentationUrl = url.replace('index.meta', presentationPath)
  558:             else:
  559:                 presentationUrl = url + "/" + presentationPath
  560:             docinfo = self.getNumPages(docinfo) #im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht    
  561:             docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl, docinfo=docinfo, dom=dom)
  562:     
  563:         docinfo = self.getAuthinfoFromIndexMeta(url, docinfo=docinfo, dom=dom)   # get access info
  564:         
  565:         return docinfo
  566:    
  567:    
  568:     def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None):
  569:         """gets the bibliographical information from the preseantion entry in texttools
  570:         """
  571:         dom=self.getPresentationInfoXML(url)
  572:         try:
  573:             docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
  574:         except:
  575:             pass
  576:         try:
  577:             docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
  578:         except:
  579:             pass
  580:         try:
  581:             docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
  582:         except:
  583:             pass
  584:         return docinfo
  585:     
  586:     def getDocinfoFromImagePath(self,path,docinfo=None,cut=0):
  587:         """path ist the path to the images it assumes that the index.meta file is one level higher."""
  588:         logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path))
  589:         if docinfo is None:
  590:             docinfo = {}
  591:         path=path.replace("/mpiwg/online","")
  592:         docinfo['imagePath'] = path
  593:         docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut)
  594:         
  595:         pathorig=path
  596:         for x in range(cut):       
  597:                 path=getParentDir(path)
  598:         logging.error("PATH:"+path)
  599:         imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path
  600:         docinfo['imageURL'] = imageUrl
  601:         
  602:         #path ist the path to the images it assumes that the index.meta file is one level higher.
  603:         docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1)
  604:         docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1)
  605:         return docinfo
  606:     
  607:     
  608:     def getDocinfo(self, mode, url):
  609:         """returns docinfo depending on mode"""
  610:         logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url))
  611:         # look for cached docinfo in session
  612:         if self.REQUEST.SESSION.has_key('docinfo'):
  613:             docinfo = self.REQUEST.SESSION['docinfo']
  614:             # check if its still current
  615:             if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url:
  616:                 logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo)
  617:                 return docinfo
  618:         # new docinfo
  619:         docinfo = {'mode': mode, 'url': url}
  620:         if mode=="texttool": #index.meta with texttool information
  621:             docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo)
  622:         elif mode=="imagepath":
  623:             docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo)
  624:         elif mode=="filepath":
  625:             docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1)
  626:         else:
  627:             logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!")
  628:             raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode))
  629:                         
  630:         logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo)
  631:         self.REQUEST.SESSION['docinfo'] = docinfo
  632:         return docinfo
  633:         
  634:         
  635:     def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None, viewMode=None, tocMode=None):
  636:         """returns pageinfo with the given parameters"""
  637:         pageinfo = {}
  638:         current = getInt(current)
  639:         pageinfo['current'] = current
  640:         rows = int(rows or self.thumbrows)
  641:         pageinfo['rows'] = rows
  642:         cols = int(cols or self.thumbcols)
  643:         pageinfo['cols'] = cols
  644:         grpsize = cols * rows
  645:         pageinfo['groupsize'] = grpsize
  646:         start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1)))
  647:         # int(current / grpsize) * grpsize +1))
  648:         pageinfo['start'] = start
  649:         pageinfo['end'] = start + grpsize
  650:         if (docinfo is not None) and ('numPages' in docinfo):
  651:             np = int(docinfo['numPages'])
  652:             pageinfo['end'] = min(pageinfo['end'], np)
  653:             pageinfo['numgroups'] = int(np / grpsize)
  654:             if np % grpsize > 0:
  655:                 pageinfo['numgroups'] += 1
  656:                 
  657:         pageinfo['viewMode'] = viewMode
  658:         pageinfo['tocMode'] = tocMode
  659:         pageinfo['query'] = self.REQUEST.get('query',' ')
  660:         pageinfo['queryType'] = self.REQUEST.get('queryType',' ')
  661:         pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext')
  662:         pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30')
  663:         pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '20')
  664:         pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1')
  665:         #if 'tocSize_%s'%tocMode in docinfo:
  666:             # cached toc
  667:           # pageinfo['tocPN'] = min (int (docinfo['tocSize_%s'%tocMode])/int(pageinfo['tocPageSize']),int(pageinfo['tocPN']))
  668:                
  669:         pageinfo['searchPN'] =self.REQUEST.get('searchPN','1')
  670:         pageinfo['sn'] =self.REQUEST.get('sn','1')
  671: 
  672:         return pageinfo
  673:                 
  674:     def getSearch(self, pn=1, pageinfo=None,  docinfo=None, query=None, queryType=None):
  675:         """get search list"""
  676:         docpath = docinfo['textURLPath'] 
  677:         pagesize = pageinfo['queryPageSize']
  678:         pn = pageinfo['searchPN']
  679:         sn = pageinfo['sn']
  680:         query =pageinfo['query']
  681:         queryType =pageinfo['queryType']
  682:         viewMode=  pageinfo['viewMode']
  683:         tocMode = pageinfo['tocMode']
  684:         tocPN = pageinfo['tocPN']
  685:         pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s"%(docpath, 'text', queryType, query, pagesize, pn, sn) ,outputUnicode=False)           
  686:         pagedom = Parse(pagexml)
  687:         pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
  688:         
  689:         selfurl = self.absolute_url()
  690:         
  691:         page = pagexml.replace('page-fragment.xql?document=/echo/la/Benedetti_1585.xml','%s?url=/mpiwg/online/permanent/library/163127KK&viewMode=%s&tocMode=%s&tocPN=%s&query=%s&queryType=%s'%(selfurl, viewMode, tocMode, tocPN, query, queryType))
  692:         text =page.replace('mode=text','mode=texttool')
  693:         href = text.replace('lt/lex.xql','%s/template/head_main_voc'%selfurl)
  694:         lemma= href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
  695:         #logging.debug("documentViewer (gettoc) lemma: %s"%(lemma))
  696:         
  697:         return lemma
  698:                        
  699:     
  700:         #if len(pagedivs) > 0:
  701:         #    pagenode = pagedom[0]
  702:         #    return serializeNode(pagenode)
  703:         #else:
  704:         #    return "xaxa"
  705: 
  706:     def getNumPages(self,docinfo=None):
  707:         """get list of pages from fulltext and put in docinfo"""
  708:         xquery = '//pb'
  709:         text = self.template.fulltextclient.eval("/mpdl/interface/xquery.xql", "document=%s&xquery=%s"%(docinfo['textURLPath'],xquery))
  710:         # TODO: better processing of the page list. do we need the info somewhere else also?
  711:         docinfo['numPages'] = text.count("<pb ")
  712:         return docinfo
  713:        
  714:     def getTextPage(self, mode="text", pn=1, docinfo=None, pageinfo=None,):
  715:         """returns single page from fulltext"""
  716:         docpath = docinfo['textURLPath']
  717:         if mode == "text_dict":
  718:             textmode = "textPollux"
  719:         else:
  720:             textmode = mode
  721:             
  722:         #selfurl = self.absolute_url()  
  723:         #viewMode=  pageinfo['viewMode']
  724:         #tocMode = pageinfo['tocMode']
  725:         #tocPN = pageinfo['tocPN']
  726:   
  727:         pagexml=self.template.fulltextclient.eval("/mpdl/interface/page-fragment.xql", "document=%s&mode=%s&pn=%s"%(docpath,textmode,pn), outputUnicode=False)
  728:         #######
  729:         #page = pagexml.replace('page-fragment.xql?document=/echo/la/Benedetti_1585.xml','%s?url=/mpiwg/online/permanent/library/163127KK&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl, viewMode, tocMode, tocPN))
  730:         #text =page.replace('mode=text','mode=texttool')
  731:         #######
  732:         # post-processing downloaded xml
  733:         pagedom = Parse(pagexml)
  734:         # plain text mode
  735:         if mode == "text":
  736:             # first div contains text
  737:             pagedivs = pagedom.xpath("/div")
  738:             #queryResultPage
  739:             if len(pagedivs) > 0:
  740:                 pagenode = pagedivs[0]
  741:                 return serializeNode(pagenode)
  742: 
  743:         # text-with-links mode
  744:         if mode == "text_dict":
  745:             # first div contains text
  746:             pagedivs = pagedom.xpath("/div")
  747:             if len(pagedivs) > 0:
  748:                 pagenode = pagedivs[0]
  749:                 # check all a-tags
  750:                 links = pagenode.xpath("//a")
  751:                 for l in links:
  752:                     hrefNode = l.getAttributeNodeNS(None, u"href")
  753:                     if hrefNode:
  754:                         # is link with href
  755:                         href = hrefNode.nodeValue
  756:                         if href.startswith('lt/lex.xql'):
  757:                             # is pollux link
  758:                             selfurl = self.absolute_url()
  759:                             # change href
  760:                             hrefNode.nodeValue = href.replace('lt/lex.xql','%s/template/head_main_voc'%selfurl)
  761:                             # add target
  762:                             l.setAttributeNS(None, 'target', '_blank')
  763:                             
  764:                         if href.startswith('lt/lemma.xql'):    
  765:                             selfurl = self.absolute_url()
  766:                             hrefNode.nodeValue = href.replace('lt/lemma.xql','%s/template/head_main_lemma'%selfurl)
  767:                             l.setAttributeNS(None, 'target', '_blank')
  768:                 return serializeNode(pagenode)
  769:         
  770:         return "no text here"
  771: 
  772:     def getTranslate(self, query=None, language=None):
  773:         """translate into another languages"""
  774:         pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lex.xql","query=%s&language=%s"%(query,language),outputUnicode=False)
  775:         return pagexml
  776:     
  777:     def getLemma(self, lemma=None, language=None):
  778:         """lemma"""
  779:         pagexml=self.template.fulltextclient.eval("/mpdl/interface/lt/lemma.xql","lemma=%s&language=%s"%(lemma,language),outputUnicode=False)
  780:         return pagexml
  781: 
  782:     def getQuery (self,  docinfo=None, pageinfo=None, query=None, queryType=None, pn=1):
  783:          """number of"""
  784:          docpath = docinfo['textURLPath'] 
  785:          pagesize = pageinfo['queryPageSize']
  786:          pn = pageinfo['searchPN']
  787:          query =pageinfo['query']
  788:          queryType =pageinfo['queryType']
  789: 
  790:          tocSearch = 0
  791:          tocDiv = None
  792:          pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath, 'text', queryType, query, pagesize, pn) ,outputUnicode=False)
  793:          
  794:          pagedom = Parse(pagexml)
  795:          numdivs = pagedom.xpath("//div[@class='queryResultHits']")
  796:          tocSearch = int(getTextFromNode(numdivs[0]))
  797:          tc=int((tocSearch/20)+1)
  798:          logging.debug("documentViewer (gettoc) tc: %s"%(tc))
  799:          return tc
  800: 
  801:     def getToc(self, mode="text", docinfo=None):
  802:         """loads table of contents and stores in docinfo"""
  803:         logging.debug("documentViewer (gettoc) mode: %s"%(mode))
  804:         if 'tocSize_%s'%mode in docinfo:
  805:             # cached toc
  806:             return docinfo 
  807:         docpath = docinfo['textURLPath']
  808:         # we need to set a result set size
  809:         pagesize = 1000
  810:         pn = 1
  811:         if mode == "text":
  812:             queryType = "toc"
  813:         else:
  814:             queryType = mode
  815:         # number of entries in toc
  816:         tocSize = 0
  817:         tocDiv = None
  818:         pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType,pagesize,pn), outputUnicode=False)
  819:         # post-processing downloaded xml
  820:         pagedom = Parse(pagexml)
  821:         # get number of entries
  822:         numdivs = pagedom.xpath("//div[@class='queryResultHits']")
  823:         if len(numdivs) > 0:
  824:             tocSize = int(getTextFromNode(numdivs[0]))
  825:             # div contains text
  826:             #pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
  827:             #if len(pagedivs) > 0:
  828:             #    tocDiv = pagedivs[0]
  829: 
  830:         docinfo['tocSize_%s'%mode] = tocSize
  831:         #docinfo['tocDiv_%s'%mode] = tocDiv
  832:         return docinfo
  833:     
  834:     def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
  835:         """returns single page from the table of contents"""
  836:         # TODO: this should use the cached TOC
  837:         if mode == "text":
  838:             queryType = "toc"
  839:         else:
  840:             queryType = mode
  841:         docpath = docinfo['textURLPath']
  842:         pagesize = pageinfo['tocPageSize']
  843:         pn = pageinfo['tocPN']
  844:         
  845:         selfurl = self.absolute_url()  
  846:         viewMode=  pageinfo['viewMode']
  847:         tocMode = pageinfo['tocMode']
  848:         tocPN = pageinfo['tocPN']
  849:     
  850:         pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn), outputUnicode=False)
  851:         page = pagexml.replace('page-fragment.xql?document=/echo/la/Benedetti_1585.xml','%s?url=/mpiwg/online/permanent/library/163127KK&viewMode=%s&tocMode=%s&tocPN=%s'%(selfurl, viewMode, tocMode, tocPN))
  852:         text = page.replace('mode=image','mode=texttool')
  853:         return text
  854:         # post-processing downloaded xml
  855:         #pagedom = Parse(text)
  856:         # div contains text
  857:         #pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
  858:         #if len(pagedivs) > 0:
  859:         #    pagenode = pagedivs[0]
  860:         #    return serializeNode(pagenode)
  861:         #else:
  862:         #    return "No TOC!"
  863: 
  864:     
  865:     def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None):
  866:         """init document viewer"""
  867:         self.title=title
  868:         self.digilibBaseUrl = digilibBaseUrl
  869:         self.thumbrows = thumbrows
  870:         self.thumbcols = thumbcols
  871:         self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
  872:         if RESPONSE is not None:
  873:             RESPONSE.redirect('manage_main')
  874:     
  875:     
  876:         
  877: def manage_AddDocumentViewerForm(self):
  878:     """add the viewer form"""
  879:     pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self)
  880:     return pt()
  881:   
  882: def manage_AddDocumentViewer(self,id,imageScalerUrl="",textServerName="",title="",RESPONSE=None):
  883:     """add the viewer"""
  884:     newObj=documentViewer(id,imageScalerUrl=imageScalerUrl,title=title,textServerName=textServerName)
  885:     self._setObject(id,newObj)
  886:     
  887:     if RESPONSE is not None:
  888:         RESPONSE.redirect('manage_main')
  889: 
  890: 
  891: ##
  892: ## DocumentViewerTemplate class
  893: ##
  894: class DocumentViewerTemplate(ZopePageTemplate):
  895:     """Template for document viewer"""
  896:     meta_type="DocumentViewer Template"
  897: 
  898: 
  899: def manage_addDocumentViewerTemplateForm(self):
  900:     """Form for adding"""
  901:     pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self)
  902:     return pt()
  903: 
  904: def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None,
  905:                            REQUEST=None, submit=None):
  906:     "Add a Page Template with optional file content."
  907: 
  908:     self._setObject(id, DocumentViewerTemplate(id))
  909:     ob = getattr(self, id)
  910:     txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read()
  911:     logging.info("txt %s:"%txt)
  912:     ob.pt_edit(txt,"text/html")
  913:     if title:
  914:         ob.pt_setTitle(title)
  915:     try:
  916:         u = self.DestinationURL()
  917:     except AttributeError:
  918:         u = REQUEST['URL1']
  919:         
  920:     u = "%s/%s" % (u, urllib.quote(id))
  921:     REQUEST.RESPONSE.redirect(u+'/manage_main')
  922:     return ''
  923: 
  924: 
  925:     

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>