File:  [Repository] / documentViewer / documentViewer.py
Revision 1.36: download - view: text, annotated - select for diffs - revision graph
Tue Nov 4 20:36:50 2008 UTC (15 years, 7 months ago) by dwinter
Branches: MAIN
CVS tags: HEAD
marks

    1: 
    2: 
    3: from OFS.Folder import Folder
    4: from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
    5: from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
    6: from AccessControl import ClassSecurityInfo
    7: from AccessControl import getSecurityManager
    8: from Globals import package_home
    9: 
   10: from Ft.Xml.Domlette import NonvalidatingReader
   11: from Ft.Xml.Domlette import PrettyPrint, Print
   12: from Ft.Xml import EMPTY_NAMESPACE, Parse
   13: 
   14: import Ft.Xml.XPath
   15: 
   16: import os.path
   17: import sys
   18: import cgi
   19: import urllib
   20: import logging
   21: import math
   22: 
   23: import urlparse 
   24: 
   25: def logger(txt,method,txt2):
   26:     """logging"""
   27:     logging.info(txt+ txt2)
   28:     
   29:     
   30: def getInt(number, default=0):
   31:     """returns always an int (0 in case of problems)"""
   32:     try:
   33:         return int(number)
   34:     except:
   35:         return int(default)
   36: 
   37: def getTextFromNode(nodename):
   38:     """get the cdata content of a node"""
   39:     if nodename is None:
   40:         return ""
   41:     nodelist=nodename.childNodes
   42:     rc = ""
   43:     for node in nodelist:
   44:         if node.nodeType == node.TEXT_NODE:
   45:            rc = rc + node.data
   46:     return rc
   47: 
   48:         
   49: def getParentDir(path):
   50:     """returns pathname shortened by one"""
   51:     return '/'.join(path.split('/')[0:-1])
   52:         
   53: 
   54: import socket
   55: 
   56: def urlopen(url,timeout=2):
   57:         """urlopen mit timeout"""
   58:         socket.setdefaulttimeout(timeout)
   59:         ret=urllib.urlopen(url)
   60:         socket.setdefaulttimeout(5)
   61:         return ret
   62: 
   63: 
   64: ##
   65: ## documentViewer class
   66: ##
   67: class documentViewer(Folder):
   68:     """document viewer"""
   69:     #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?"
   70:     
   71:     meta_type="Document viewer"
   72:     
   73:     security=ClassSecurityInfo()
   74:     manage_options=Folder.manage_options+(
   75:         {'label':'main config','action':'changeDocumentViewerForm'},
   76:         )
   77: 
   78:     # templates and forms
   79:     viewer_main = PageTemplateFile('zpt/viewer_main', globals())
   80:     thumbs_main = PageTemplateFile('zpt/thumbs_main', globals())
   81:     image_main = PageTemplateFile('zpt/image_main', globals())
   82:     head_main = PageTemplateFile('zpt/head_main', globals())
   83:     docuviewer_css = PageTemplateFile('css/docuviewer.css', globals())
   84:     info_xml = PageTemplateFile('zpt/info_xml', globals())
   85: 
   86:     thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals())
   87:     security.declareProtected('View management screens','changeDocumentViewerForm')    
   88:     changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals())
   89: 
   90:     
   91:     def __init__(self,id,imageViewerUrl,textViewerUrl=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=10,authgroups="mpiwg"):
   92:         """init document viewer"""
   93:         self.id=id
   94:         self.title=title
   95:         self.imageViewerUrl=imageViewerUrl
   96:         self.textViewerUrl=textViewerUrl
   97:         
   98:         if not digilibBaseUrl:
   99:             self.digilibBaseUrl = self.findDigilibUrl()
  100:         else:
  101:             self.digilibBaseUrl = digilibBaseUrl
  102:         self.thumbcols = thumbcols
  103:         self.thumbrows = thumbrows
  104:         # authgroups is list of authorized groups (delimited by ,)
  105:         self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
  106:         # add template folder so we can always use template.something
  107:         self.manage_addFolder('template')
  108: 
  109: 
  110:     security.declareProtected('View','thumbs_rss')
  111:     def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1):
  112:         '''
  113:         view it
  114:         @param mode: defines how to access the document behind url 
  115:         @param url: url which contains display information
  116:         @param viewMode: if images display images, if text display text, default is images (text,images or auto)
  117:         
  118:         '''
  119:         logging.info("HHHHHHHHHHHHHH:load the rss")
  120:         logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
  121:         
  122:         if not hasattr(self, 'template'):
  123:             # create template folder if it doesn't exist
  124:             self.manage_addFolder('template')
  125:             
  126:         if not self.digilibBaseUrl:
  127:             self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
  128:             
  129:         docinfo = self.getDocinfo(mode=mode,url=url)
  130:         pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
  131:         pt = getattr(self.template, 'thumbs_main_rss')
  132:         
  133:         if viewMode=="auto": # automodus gewaehlt
  134:             if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
  135:                 viewMode="text"
  136:             else:
  137:                 viewMode="images"
  138:                
  139:         return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode)
  140:   
  141:     security.declareProtected('View','index_html')
  142:     def index_html(self,mode,url,viewMode="auto",start=None,pn=1,mk=None):
  143:         '''
  144:         view it
  145:         @param mode: defines how to access the document behind url 
  146:         @param url: url which contains display information
  147:         @param viewMode: if images display images, if text display text, default is images (text,images or auto)
  148:         
  149:         '''
  150:         
  151:         logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
  152:         
  153:         if not hasattr(self, 'template'):
  154:             # create template folder if it doesn't exist
  155:             self.manage_addFolder('template')
  156:             
  157:         if not self.digilibBaseUrl:
  158:             self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
  159:             
  160:         docinfo = self.getDocinfo(mode=mode,url=url)
  161:         pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
  162:         pt = getattr(self.template, 'viewer_main')
  163:         
  164:         if viewMode=="auto": # automodus gewaehlt
  165:             if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
  166:                 viewMode="text"
  167:             else:
  168:                 viewMode="images"
  169:                
  170:         return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,marks=self.generateMarks(mk))
  171:   
  172:     def generateMarks(self,mk):
  173:         ret=""
  174:         for m in mk:
  175:             ret+="mk=%s"%mk
  176:         return ret
  177:     
  178:     def getLink(self,param=None,val=None):
  179:         """link to documentviewer with parameter param set to val"""
  180:         params=self.REQUEST.form.copy()
  181:         if param is not None:
  182:             if val is None:
  183:                 if params.has_key(param):
  184:                     del params[param]
  185:             else:
  186:                 params[param] = str(val)
  187:                 
  188:         # quote values and assemble into query string
  189:         ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
  190:         url=self.REQUEST['URL1']+"?"+ps
  191:         return url
  192: 
  193:     def getLinkAmp(self,param=None,val=None):
  194:         """link to documentviewer with parameter param set to val"""
  195:         params=self.REQUEST.form.copy()
  196:         if param is not None:
  197:             if val is None:
  198:                 if params.has_key(param):
  199:                     del params[param]
  200:             else:
  201:                 params[param] = str(val)
  202:                 
  203:         # quote values and assemble into query string
  204:         logging.info("XYXXXXX: %s"%repr(params.items()))
  205:         ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
  206:         url=self.REQUEST['URL1']+"?"+ps
  207:         return url
  208:     def getInfo_xml(self,url,mode):
  209:         """returns info about the document as XML"""
  210: 
  211:         if not self.digilibBaseUrl:
  212:             self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
  213:         
  214:         docinfo = self.getDocinfo(mode=mode,url=url)
  215:         pt = getattr(self.template, 'info_xml')
  216:         return pt(docinfo=docinfo)
  217: 
  218:     
  219:     def getStyle(self, idx, selected, style=""):
  220:         """returns a string with the given style and append 'sel' if path == selected."""
  221:         #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style))
  222:         if idx == selected:
  223:             return style + 'sel'
  224:         else:
  225:             return style
  226: 
  227:         
  228:     def isAccessible(self, docinfo):
  229:         """returns if access to the resource is granted"""
  230:         access = docinfo.get('accessType', None)
  231:         logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access)
  232:         if access is not None and access == 'free':
  233:             logger("documentViewer (accessOK)", logging.INFO, "access is free")
  234:             return True
  235:         elif access is None or access in self.authgroups:
  236:             # only local access -- only logged in users
  237:             user = getSecurityManager().getUser()
  238:             if user is not None:
  239:                 #print "user: ", user
  240:                 return (user.getUserName() != "Anonymous User")
  241:             else:
  242:                 return False
  243:         
  244:         logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access)
  245:         return False
  246:     
  247:                 
  248:     def getDirinfoFromDigilib(self,path,docinfo=None,cut=0):
  249:         """gibt param von dlInfo aus"""
  250:         num_retries = 3
  251:         if docinfo is None:
  252:             docinfo = {}
  253:         
  254:         for x in range(cut):
  255:                 path=getParentDir(path)
  256:         infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path
  257:     
  258:         logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl))
  259:         
  260:         for cnt in range(num_retries):
  261:             try:
  262:                 # dom = NonvalidatingReader.parseUri(imageUrl)
  263:                 txt=urllib.urlopen(infoUrl).read()
  264:                 dom = Parse(txt)
  265:                 break
  266:             except:
  267:                 logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt))
  268:         else:
  269:             raise IOError("Unable to get dir-info from %s"%(infoUrl))
  270:         
  271:         sizes=dom.xpath("//dir/size")
  272:         logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes)
  273:         
  274:         if sizes:
  275:             docinfo['numPages'] = int(getTextFromNode(sizes[0]))
  276:         else:
  277:             docinfo['numPages'] = 0
  278:                         
  279:         return docinfo
  280:     
  281:             
  282:     def getIndexMeta(self, url):
  283:         """returns dom of index.meta document at url"""
  284:         num_retries = 3
  285:         dom = None
  286:         metaUrl = None
  287:         if url.startswith("http://"):
  288:             # real URL
  289:             metaUrl = url
  290:         else:
  291:             # online path
  292:             server=self.digilibBaseUrl+"/servlet/Texter?fn="
  293:             metaUrl=server+url.replace("/mpiwg/online","")
  294:             if not metaUrl.endswith("index.meta"):
  295:                 metaUrl += "/index.meta"
  296:         print metaUrl
  297:         for cnt in range(num_retries):
  298:             try:
  299:                 # patch dirk encoding fehler treten dann nicht mehr auf
  300:                 # dom = NonvalidatingReader.parseUri(metaUrl)
  301:                 txt=urllib.urlopen(metaUrl).read()
  302:                 dom = Parse(txt)
  303:                 break
  304:             except:
  305:                 logger("ERROR documentViewer (getIndexMata)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
  306:                 
  307:         if dom is None:
  308:             raise IOError("Unable to read index meta from %s"%(url))
  309:                  
  310:         return dom
  311:     
  312:     def getPresentationInfoXML(self, url):
  313:         """returns dom of info.xml document at url"""
  314:         num_retries = 3
  315:         dom = None
  316:         metaUrl = None
  317:         if url.startswith("http://"):
  318:             # real URL
  319:             metaUrl = url
  320:         else:
  321:             # online path
  322:             server=self.digilibBaseUrl+"/servlet/Texter?fn="
  323:             metaUrl=server+url.replace("/mpiwg/online","")
  324:            
  325:         
  326:         for cnt in range(num_retries):
  327:             try:
  328:                 # patch dirk encoding fehler treten dann nicht mehr auf
  329:                 # dom = NonvalidatingReader.parseUri(metaUrl)
  330:                 txt=urllib.urlopen(metaUrl).read()
  331:                 dom = Parse(txt)
  332:                 break
  333:             except:
  334:                 logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
  335:                 
  336:         if dom is None:
  337:             raise IOError("Unable to read infoXMLfrom %s"%(url))
  338:                  
  339:         return dom
  340:                         
  341:         
  342:     def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
  343:         """gets authorization info from the index.meta file at path or given by dom"""
  344:         logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path))
  345:         
  346:         access = None
  347:         
  348:         if docinfo is None:
  349:             docinfo = {}
  350:             
  351:         if dom is None:
  352:             for x in range(cut+1):
  353:                 path=getParentDir(path)
  354:             dom = self.getIndexMeta(path)
  355:        
  356:         acctype = dom.xpath("//access-conditions/access/@type")
  357:         if acctype and (len(acctype)>0):
  358:             access=acctype[0].value
  359:             if access in ['group', 'institution']:
  360:                 access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
  361:             
  362:         docinfo['accessType'] = access
  363:         return docinfo
  364:     
  365:         
  366:     def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
  367:         """gets bibliographical info from the index.meta file at path or given by dom"""
  368:         logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path))
  369:         
  370:         if docinfo is None:
  371:             docinfo = {}
  372:             
  373:         if dom is None:
  374:             for x in range(cut+1):
  375:                 path=getParentDir(path)
  376:             dom = self.getIndexMeta(path)
  377:             
  378:         # put in all raw bib fields as dict "bib"
  379:         bib = dom.xpath("//bib/*")
  380:         if bib and len(bib)>0:
  381:             bibinfo = {}
  382:             for e in bib:
  383:                 bibinfo[e.localName] = getTextFromNode(e)
  384:             docinfo['bib'] = bibinfo
  385:         
  386:         # extract some fields (author, title, year) according to their mapping
  387:         metaData=self.metadata.main.meta.bib
  388:         bibtype=dom.xpath("//bib/@type")
  389:         if bibtype and (len(bibtype)>0):
  390:             bibtype=bibtype[0].value
  391:         else:
  392:             bibtype="generic"
  393:             
  394:         bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
  395:         docinfo['bib_type'] = bibtype
  396:         bibmap=metaData.generateMappingForType(bibtype)
  397:         # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
  398:         if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
  399:             try:
  400:                 docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
  401:             except: pass
  402:             try:
  403:                 docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
  404:             except: pass
  405:             try:
  406:                 docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
  407:             except: pass
  408:             logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
  409:             try:
  410:                 docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
  411:             except:
  412:                 docinfo['lang']=''
  413: 
  414:         return docinfo
  415: 
  416:         
  417:     def getDocinfoFromTextTool(self,url,dom=None,docinfo=None):
  418:        """parse texttool tag in index meta"""
  419:        logger("documentViewer (getdocinfofromtexttool)", logging.INFO,"url: %s"%(url))
  420:        if docinfo is None:
  421:            docinfo = {}
  422:            
  423:        if docinfo.get('lang',None) is None:
  424:            docinfo['lang']='' # default keine Sprache gesetzt
  425:        if dom is None:
  426:            dom = self.getIndexMeta(url)
  427:        
  428:        archivePath = None
  429:        archiveName = None
  430: 
  431:        archiveNames=dom.xpath("//resource/name")
  432:        if archiveNames and (len(archiveNames)>0):
  433:            archiveName=getTextFromNode(archiveNames[0])
  434:        else:
  435:            logger("documentViewer (getdocinfofromtexttool)", logging.WARNING,"resource/name missing in: %s"%(url))
  436:        
  437:        archivePaths=dom.xpath("//resource/archive-path")
  438:        if archivePaths and (len(archivePaths)>0):
  439:            archivePath=getTextFromNode(archivePaths[0])
  440:            # clean up archive path
  441:            if archivePath[0] != '/':
  442:                archivePath = '/' + archivePath
  443:            if archiveName and (not archivePath.endswith(archiveName)):
  444:                archivePath += "/" + archiveName
  445:        else:
  446:            # try to get archive-path from url
  447:            logger("documentViewer (getdocinfofromtexttool)", logging.WARNING,"resource/archive-path missing in: %s"%(url))
  448:            if (not url.startswith('http')):
  449:                archivePath = url.replace('index.meta', '')
  450:                
  451:        if archivePath is None:
  452:            # we balk without archive-path
  453:            raise IOError("Missing archive-path (for text-tool) in %s"%(url))
  454:        
  455:        imageDirs=dom.xpath("//texttool/image")
  456:        if imageDirs and (len(imageDirs)>0):
  457:            imageDir=getTextFromNode(imageDirs[0])
  458:        else:
  459:            # we balk with no image tag / not necessary anymore because textmode is now standard
  460:            #raise IOError("No text-tool info in %s"%(url))
  461:            imageDir=""
  462:            docinfo['numPages']=1 # im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht
  463:        
  464:            docinfo['imagePath'] = "" # keine Bilder
  465:            docinfo['imageURL'] = ""
  466: 
  467:        if imageDir and archivePath:
  468:            #print "image: ", imageDir, " archivepath: ", archivePath
  469:            imageDir=os.path.join(archivePath,imageDir)
  470:            imageDir=imageDir.replace("/mpiwg/online",'')
  471:            docinfo=self.getDirinfoFromDigilib(imageDir,docinfo=docinfo)
  472:            docinfo['imagePath'] = imageDir
  473:            docinfo['imageURL'] = self.digilibBaseUrl+"/servlet/Scaler?fn="+imageDir
  474:            
  475:        viewerUrls=dom.xpath("//texttool/digiliburlprefix")
  476:        if viewerUrls and (len(viewerUrls)>0):
  477:            viewerUrl=getTextFromNode(viewerUrls[0])
  478:            docinfo['viewerURL'] = viewerUrl
  479:                   
  480:        textUrls=dom.xpath("//texttool/text")
  481:        if textUrls and (len(textUrls)>0):
  482:            textUrl=getTextFromNode(textUrls[0])
  483:            if urlparse.urlparse(textUrl)[0]=="": #keine url
  484:                textUrl=os.path.join(archivePath,textUrl) 
  485:            # fix URLs starting with /mpiwg/online
  486:            if textUrl.startswith("/mpiwg/online"):
  487:                textUrl = textUrl.replace("/mpiwg/online",'',1)
  488:            
  489:            docinfo['textURL'] = textUrl
  490:    
  491:        presentationUrls=dom.xpath("//texttool/presentation")
  492:        docinfo = self.getBibinfoFromIndexMeta(url,docinfo=docinfo,dom=dom)   # get info von bib tag
  493:        
  494:        if presentationUrls and (len(presentationUrls)>0): # ueberschreibe diese durch presentation informationen 
  495:             # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten
  496:             # durch den relativen Pfad auf die presentation infos
  497:            presentationUrl=url.replace('index.meta',getTextFromNode(presentationUrls[0]))
  498:            docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl,docinfo=docinfo,dom=dom)
  499: 
  500:        docinfo = self.getAuthinfoFromIndexMeta(url,docinfo=docinfo,dom=dom)   # get access info
  501:        return docinfo
  502:    
  503:    
  504:     def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None):
  505:         """gets the bibliographical information from the preseantion entry in texttools
  506:         """
  507:         dom=self.getPresentationInfoXML(url)
  508:         try:
  509:             docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
  510:         except:
  511:             pass
  512:         try:
  513:             docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
  514:         except:
  515:             pass
  516:         try:
  517:             docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
  518:         except:
  519:             pass
  520:         return docinfo
  521:     
  522:     def getDocinfoFromImagePath(self,path,docinfo=None,cut=0):
  523:         """path ist the path to the images it assumes that the index.meta file is one level higher."""
  524:         logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path))
  525:         if docinfo is None:
  526:             docinfo = {}
  527:         path=path.replace("/mpiwg/online","")
  528:         docinfo['imagePath'] = path
  529:         docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut)
  530:         imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path
  531:         docinfo['imageURL'] = imageUrl
  532:         
  533:         docinfo = self.getBibinfoFromIndexMeta(path,docinfo=docinfo,cut=cut)
  534:         docinfo = self.getAuthinfoFromIndexMeta(path,docinfo=docinfo,cut=cut)
  535:         return docinfo
  536:     
  537:     
  538:     def getDocinfo(self, mode, url):
  539:         """returns docinfo depending on mode"""
  540:         logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url))
  541:         # look for cached docinfo in session
  542:         if self.REQUEST.SESSION.has_key('docinfo'):
  543:             docinfo = self.REQUEST.SESSION['docinfo']
  544:             # check if its still current
  545:             if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url:
  546:                 logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo)
  547:                 return docinfo
  548:         # new docinfo
  549:         docinfo = {'mode': mode, 'url': url}
  550:         if mode=="texttool": #index.meta with texttool information
  551:             docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo)
  552:         elif mode=="imagepath":
  553:             docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo)
  554:         elif mode=="filepath":
  555:             docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=2)
  556:         else:
  557:             logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!")
  558:             raise ValueError("Unknown mode %s"%(mode))
  559:                         
  560:         logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo)
  561:         self.REQUEST.SESSION['docinfo'] = docinfo
  562:         return docinfo
  563:         
  564:         
  565:     def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None):
  566:         """returns pageinfo with the given parameters"""
  567:         pageinfo = {}
  568:         current = getInt(current)
  569:         pageinfo['current'] = current
  570:         rows = int(rows or self.thumbrows)
  571:         pageinfo['rows'] = rows
  572:         cols = int(cols or self.thumbcols)
  573:         pageinfo['cols'] = cols
  574:         grpsize = cols * rows
  575:         pageinfo['groupsize'] = grpsize
  576:         start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1)))
  577:         # int(current / grpsize) * grpsize +1))
  578:         pageinfo['start'] = start
  579:         pageinfo['end'] = start + grpsize
  580:         if docinfo is not None:
  581:             np = int(docinfo['numPages'])
  582:             pageinfo['end'] = min(pageinfo['end'], np)
  583:             pageinfo['numgroups'] = int(np / grpsize)
  584:             if np % grpsize > 0:
  585:                 pageinfo['numgroups'] += 1
  586:                 
  587:         return pageinfo
  588:                 
  589:     def text(self,mode,url,pn):
  590:         """give text"""
  591:         if mode=="texttool": #index.meta with texttool information
  592:             (viewerUrl,imagepath,textpath)=parseUrlTextTool(url)
  593:         
  594:         #print textpath
  595:         try:
  596:             dom = NonvalidatingReader.parseUri(textpath)
  597:         except:
  598:             return None
  599:     
  600:         list=[]
  601:         nodes=dom.xpath("//pb")
  602: 
  603:         node=nodes[int(pn)-1]
  604:         
  605:         p=node
  606:         
  607:         while p.tagName!="p":
  608:             p=p.parentNode
  609:         
  610:         
  611:         endNode=nodes[int(pn)]
  612:         
  613:         
  614:         e=endNode
  615:         
  616:         while e.tagName!="p":
  617:             e=e.parentNode
  618:         
  619:         
  620:         next=node.parentNode
  621:         
  622:         #sammle s
  623:         while next and (next!=endNode.parentNode):
  624:             list.append(next)    
  625:             next=next.nextSibling    
  626:         list.append(endNode.parentNode)
  627:         
  628:         if p==e:# beide im selben paragraphen
  629:             pass
  630: #    else:
  631: #            next=p
  632: #            while next!=e:
  633: #                print next,e
  634: #                list.append(next)
  635: #                next=next.nextSibling
  636: #            
  637: #        for x in list:
  638: #            PrettyPrint(x)
  639: #
  640: #        return list
  641: #
  642: 
  643:     def findDigilibUrl(self):
  644:         """try to get the digilib URL from zogilib"""
  645:         url = self.imageViewerUrl[:-1] + "/getScalerUrl"
  646:         #print urlparse.urlparse(url)[0]
  647:         #print urlparse.urljoin(self.absolute_url(),url)
  648:         logging.info("finddigiliburl: %s"%urlparse.urlparse(url)[0])
  649:         logging.info("finddigiliburl: %s"%urlparse.urljoin(self.absolute_url(),url))
  650:         
  651:         try:
  652:             if urlparse.urlparse(url)[0]=='': #relative path
  653:                 url=urlparse.urljoin(self.absolute_url()+"/",url)
  654:                 
  655:             scaler = urlopen(url).read()
  656:             return scaler.replace("/servlet/Scaler?", "")
  657:         except:
  658:             return None
  659:     
  660:     def changeDocumentViewer(self,imageViewerUrl,textViewerUrl,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=10,authgroups='mpiwg',RESPONSE=None):
  661:         """init document viewer"""
  662:         self.title=title
  663:         self.imageViewerUrl=imageViewerUrl
  664:         self.textViewerUrl=textViewerUrl
  665:         self.digilibBaseUrl = digilibBaseUrl
  666:         self.thumbrows = thumbrows
  667:         self.thumbcols = thumbcols
  668:         self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
  669:         if RESPONSE is not None:
  670:             RESPONSE.redirect('manage_main')
  671:     
  672:     
  673:         
  674:         
  675: #    security.declareProtected('View management screens','renameImageForm')
  676: 
  677: def manage_AddDocumentViewerForm(self):
  678:     """add the viewer form"""
  679:     pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self)
  680:     return pt()
  681:   
  682: def manage_AddDocumentViewer(self,id,imageViewerUrl="",textViewerUrl="",title="",RESPONSE=None):
  683:     """add the viewer"""
  684:     newObj=documentViewer(id,imageViewerUrl,title=title,textViewerUrl=textViewerUrl)
  685:     self._setObject(id,newObj)
  686:     
  687:     if RESPONSE is not None:
  688:         RESPONSE.redirect('manage_main')
  689: 
  690: 
  691: ##
  692: ## DocumentViewerTemplate class
  693: ##
  694: class DocumentViewerTemplate(ZopePageTemplate):
  695:     """Template for document viewer"""
  696:     meta_type="DocumentViewer Template"
  697: 
  698: 
  699: def manage_addDocumentViewerTemplateForm(self):
  700:     """Form for adding"""
  701:     pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self)
  702:     return pt()
  703: 
  704: def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None,
  705:                            REQUEST=None, submit=None):
  706:     "Add a Page Template with optional file content."
  707: 
  708:     self._setObject(id, DocumentViewerTemplate(id))
  709:     ob = getattr(self, id)
  710:     txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read()
  711:     logging.info("txt %s:"%txt)
  712:     ob.pt_edit(txt,"text/html")
  713:     if title:
  714:         ob.pt_setTitle(title)
  715:     try:
  716:         u = self.DestinationURL()
  717:     except AttributeError:
  718:         u = REQUEST['URL1']
  719:         
  720:     u = "%s/%s" % (u, urllib.quote(id))
  721:     REQUEST.RESPONSE.redirect(u+'/manage_main')
  722:     return ''
  723: 
  724: 
  725:     

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>