File:  [Repository] / documentViewer / documentViewer.py
Revision 1.45: download - view: text, annotated - select for diffs - revision graph
Mon Apr 19 13:13:13 2010 UTC (14 years, 1 month ago) by abukhman
Branches: MAIN
CVS tags: HEAD
Last update with search function (getSearch)


from OFS.Folder import Folder
from Products.PageTemplates.ZopePageTemplate import ZopePageTemplate
from Products.PageTemplates.PageTemplateFile import PageTemplateFile 
from AccessControl import ClassSecurityInfo
from AccessControl import getSecurityManager
from Globals import package_home

from Ft.Xml.Domlette import NonvalidatingReader
from Ft.Xml.Domlette import PrettyPrint, Print
from Ft.Xml import EMPTY_NAMESPACE, Parse


import Ft.Xml.XPath
import cStringIO
import xmlrpclib
import os.path
import sys
import cgi
import urllib
import logging
import math

import urlparse 
from types import *

def logger(txt,method,txt2):
    """logging"""
    logging.info(txt+ txt2)
    
    
def getInt(number, default=0):
    """returns always an int (0 in case of problems)"""
    try:
        return int(number)
    except:
        return int(default)

def getTextFromNode(nodename):
    """get the cdata content of a node"""
    if nodename is None:
        return ""
    nodelist=nodename.childNodes
    rc = ""
    for node in nodelist:
        if node.nodeType == node.TEXT_NODE:
           rc = rc + node.data
    return rc

def serializeNode(node, encoding='utf-8'):
    """returns a string containing node as XML"""
    buf = cStringIO.StringIO()
    Print(node, stream=buf, encoding=encoding)
    s = buf.getvalue()
    buf.close()
    return s

        
def getParentDir(path):
    """returns pathname shortened by one"""
    return '/'.join(path.split('/')[0:-1])
        

import socket

def urlopen(url,timeout=2):
        """urlopen mit timeout"""
        socket.setdefaulttimeout(timeout)
        ret=urllib.urlopen(url)
        socket.setdefaulttimeout(5)
        return ret


##
## documentViewer class
##
class documentViewer(Folder):
    """document viewer"""
    #textViewerUrl="http://127.0.0.1:8080/HFQP/testXSLT/getPage?"
    
    meta_type="Document viewer"
    
    security=ClassSecurityInfo()
    manage_options=Folder.manage_options+(
        {'label':'main config','action':'changeDocumentViewerForm'},
        )

    # templates and forms
    viewer_main = PageTemplateFile('zpt/viewer_main', globals())
    toc_thumbs = PageTemplateFile('zpt/toc_thumbs', globals())
    toc_text = PageTemplateFile('zpt/toc_text', globals())
    toc_figures = PageTemplateFile('zpt/toc_figures', globals())
    page_main_images = PageTemplateFile('zpt/page_main_images', globals())
    page_main_text = PageTemplateFile('zpt/page_main_text', globals())
    page_main_text_dict = PageTemplateFile('zpt/page_main_text_dict', globals())
    head_main = PageTemplateFile('zpt/head_main', globals())
    docuviewer_css = PageTemplateFile('css/docuviewer.css', globals())
    info_xml = PageTemplateFile('zpt/info_xml', globals())

    thumbs_main_rss = PageTemplateFile('zpt/thumbs_main_rss', globals())
    security.declareProtected('View management screens','changeDocumentViewerForm')    
    changeDocumentViewerForm = PageTemplateFile('zpt/changeDocumentViewer', globals())

    
    def __init__(self,id,imageScalerUrl=None,textServerName=None,title="",digilibBaseUrl=None,thumbcols=2,thumbrows=5,authgroups="mpiwg"):
        """init document viewer"""
        self.id=id
        self.title=title
        self.thumbcols = thumbcols
        self.thumbrows = thumbrows
        # authgroups is list of authorized groups (delimited by ,)
        self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
        # create template folder so we can always use template.something
        
        templateFolder = Folder('template')
        #self['template'] = templateFolder # Zope-2.12 style
        self._setObject('template',templateFolder) # old style
        try:
            from Products.XMLRpcTools.XMLRpcTools import XMLRpcServerProxy
            xmlRpcClient = XMLRpcServerProxy(id='fulltextclient', serverUrl=textServerName, use_xmlrpc=False)
            #templateFolder['fulltextclient'] = xmlRpcClient
            templateFolder._setObject('fulltextclient',xmlRpcClient)
        except Exception, e:
            logging.error("Unable to create XMLRpcTools for fulltextclient: "+str(e))
        try:
            from Products.zogiLib.zogiLib import zogiLib
            zogilib = zogiLib(id="zogilib", title="zogilib for docuviewer", dlServerURL=imageScalerUrl, layout="book")
            #templateFolder['zogilib'] = zogilib
            templateFolder._setObject('zogilib',zogilib)
        except Exception, e:
            logging.error("Unable to create zogiLib for zogilib: "+str(e))
        

    security.declareProtected('View','thumbs_rss')
    def thumbs_rss(self,mode,url,viewMode="auto",start=None,pn=1):
        '''
        view it
        @param mode: defines how to access the document behind url 
        @param url: url which contains display information
        @param viewMode: if images display images, if text display text, default is images (text,images or auto)
        
        '''
        logging.debug("HHHHHHHHHHHHHH:load the rss")
        logger("documentViewer (index)", logging.INFO, "mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
        
        if not hasattr(self, 'template'):
            # create template folder if it doesn't exist
            self.manage_addFolder('template')
            
        if not self.digilibBaseUrl:
            self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
            
        docinfo = self.getDocinfo(mode=mode,url=url)
        pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo)
        pt = getattr(self.template, 'thumbs_main_rss')
        
        if viewMode=="auto": # automodus gewaehlt
            if docinfo.get("textURL",'') and self.textViewerUrl: #texturl gesetzt und textViewer konfiguriert
                viewMode="text"
            else:
                viewMode="images"
               
        return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode)
  
    security.declareProtected('View','index_html')
    def index_html(self,url,mode="texttool",viewMode="auto",tocMode="thumbs",start=None,pn=1,mk=None, query=None, querySearch=None):
        '''
        view it
        @param mode: defines how to access the document behind url 
        @param url: url which contains display information
        @param viewMode: if images display images, if text display text, default is auto (text,images or auto)
        @param tocMode: type of 'table of contents' for navigation (thumbs, text, figures, search)
        @param querySearch: type of different search modes (fulltext, fulltextMorph, xpath, xquery, ftIndex, ftIndexMorph)
        '''
        
        logging.debug("documentViewer (index) mode: %s url:%s start:%s pn:%s"%(mode,url,start,pn))
        
        if not hasattr(self, 'template'):
            # this won't work
            logging.error("template folder missing!")
            return "ERROR: template folder missing!"
            
        if not getattr(self, 'digilibBaseUrl', None):
            self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
            
        docinfo = self.getDocinfo(mode=mode,url=url)
        pageinfo = self.getPageinfo(start=start,current=pn,docinfo=docinfo,viewMode=viewMode,tocMode=tocMode)
        if tocMode != "thumbs":
            # get table of contents
            docinfo = self.getToc(mode=tocMode, docinfo=docinfo)

        if viewMode=="auto": # automodus gewaehlt
            if docinfo.get("textURL",''): #texturl gesetzt und textViewer konfiguriert
                viewMode="text"
            else:
                viewMode="images"
                
        pt = getattr(self.template, 'viewer_main')               
        return pt(docinfo=docinfo,pageinfo=pageinfo,viewMode=viewMode,mk=self.generateMarks(mk))
  
    def generateMarks(self,mk):
        ret=""
        if mk is None:
            return ""
    	if type(mk) is not ListType:
    		mk=[mk]
        for m in mk:
            ret+="mk=%s"%m
        return ret


    def findDigilibUrl(self):
        """try to get the digilib URL from zogilib"""
        url = self.template.zogilib.getDLBaseUrl()
        return url
    
    def getStyle(self, idx, selected, style=""):
        """returns a string with the given style and append 'sel' if path == selected."""
        #logger("documentViewer (getstyle)", logging.INFO, "idx: %s selected: %s style: %s"%(idx,selected,style))
        if idx == selected:
            return style + 'sel'
        else:
            return style
    
    def getLink(self,param=None,val=None):
        """link to documentviewer with parameter param set to val"""
        params=self.REQUEST.form.copy()
        if param is not None:
            if val is None:
                if params.has_key(param):
                    del params[param]
            else:
                params[param] = str(val)
                
        if params.get("mode", None) == "filepath": #wenn beim erst Aufruf filepath gesetzt wurde aendere das nun zu imagepath
                params["mode"] = "imagepath"
                params["url"] = getParentDir(params["url"])
                
        # quote values and assemble into query string
        ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
        url=self.REQUEST['URL1']+"?"+ps
        return url

    def getLinkAmp(self,param=None,val=None):
        """link to documentviewer with parameter param set to val"""
        params=self.REQUEST.form.copy()
        if param is not None:
            if val is None:
                if params.has_key(param):
                    del params[param]
            else:
                params[param] = str(val)
                
        # quote values and assemble into query string
        logging.info("XYXXXXX: %s"%repr(params.items()))
        ps = "&".join(["%s=%s"%(k,urllib.quote(v)) for (k, v) in params.items()])
        url=self.REQUEST['URL1']+"?"+ps
        return url
    
    def getInfo_xml(self,url,mode):
        """returns info about the document as XML"""

        if not self.digilibBaseUrl:
            self.digilibBaseUrl = self.findDigilibUrl() or "http://nausikaa.mpiwg-berlin.mpg.de/digitallibrary"
        
        docinfo = self.getDocinfo(mode=mode,url=url)
        pt = getattr(self.template, 'info_xml')
        return pt(docinfo=docinfo)

    
    def isAccessible(self, docinfo):
        """returns if access to the resource is granted"""
        access = docinfo.get('accessType', None)
        logger("documentViewer (accessOK)", logging.INFO, "access type %s"%access)
        if access is not None and access == 'free':
            logger("documentViewer (accessOK)", logging.INFO, "access is free")
            return True
        elif access is None or access in self.authgroups:
            # only local access -- only logged in users
            user = getSecurityManager().getUser()
            if user is not None:
                #print "user: ", user
                return (user.getUserName() != "Anonymous User")
            else:
                return False
        
        logger("documentViewer (accessOK)", logging.INFO, "unknown access type %s"%access)
        return False
    
                
    def getDirinfoFromDigilib(self,path,docinfo=None,cut=0):
        """gibt param von dlInfo aus"""
        num_retries = 3
        if docinfo is None:
            docinfo = {}
        
        for x in range(cut):
               
                path=getParentDir(path)
       
        infoUrl=self.digilibBaseUrl+"/dirInfo-xml.jsp?mo=dir&fn="+path
    
        logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo from %s"%(infoUrl))
        
        for cnt in range(num_retries):
            try:
                # dom = NonvalidatingReader.parseUri(imageUrl)
                txt=urllib.urlopen(infoUrl).read()
                dom = Parse(txt)
                break
            except:
                logger("documentViewer (getdirinfofromdigilib)", logging.ERROR, "error reading %s (try %d)"%(infoUrl,cnt))
        else:
            raise IOError("Unable to get dir-info from %s"%(infoUrl))
        
        sizes=dom.xpath("//dir/size")
        logger("documentViewer (getparamfromdigilib)", logging.INFO, "dirInfo:size"%sizes)
        
        if sizes:
            docinfo['numPages'] = int(getTextFromNode(sizes[0]))
        else:
            docinfo['numPages'] = 0
            
        # TODO: produce and keep list of image names and numbers
                        
        return docinfo
    
            
    def getIndexMeta(self, url):
        """returns dom of index.meta document at url"""
        num_retries = 3
        dom = None
        metaUrl = None
        if url.startswith("http://"):
            # real URL
            metaUrl = url
        else:
            # online path
            server=self.digilibBaseUrl+"/servlet/Texter?fn="
            metaUrl=server+url.replace("/mpiwg/online","")
            if not metaUrl.endswith("index.meta"):
                metaUrl += "/index.meta"
        logging.debug("METAURL: %s"%metaUrl)
        for cnt in range(num_retries):
            try:
                # patch dirk encoding fehler treten dann nicht mehr auf
                # dom = NonvalidatingReader.parseUri(metaUrl)
                txt=urllib.urlopen(metaUrl).read()
                dom = Parse(txt)
                break
            except:
                logger("ERROR documentViewer (getIndexMeta)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
                
        if dom is None:
            raise IOError("Unable to read index meta from %s"%(url))
                 
        return dom
    
    def getPresentationInfoXML(self, url):
        """returns dom of info.xml document at url"""
        num_retries = 3
        dom = None
        metaUrl = None
        if url.startswith("http://"):
            # real URL
            metaUrl = url
        else:
            # online path
            server=self.digilibBaseUrl+"/servlet/Texter?fn="
            metaUrl=server+url.replace("/mpiwg/online","")
        
        for cnt in range(num_retries):
            try:
                # patch dirk encoding fehler treten dann nicht mehr auf
                # dom = NonvalidatingReader.parseUri(metaUrl)
                txt=urllib.urlopen(metaUrl).read()
                dom = Parse(txt)
                break
            except:
                logger("ERROR documentViewer (getPresentationInfoXML)", logging.INFO,"%s (%s)"%sys.exc_info()[0:2])
                
        if dom is None:
            raise IOError("Unable to read infoXMLfrom %s"%(url))
                 
        return dom
                        
        
    def getAuthinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
        """gets authorization info from the index.meta file at path or given by dom"""
        logger("documentViewer (getauthinfofromindexmeta)", logging.INFO,"path: %s"%(path))
        
        access = None
        
        if docinfo is None:
            docinfo = {}
            
        if dom is None:
            for x in range(cut):
                path=getParentDir(path)
            dom = self.getIndexMeta(path)
       
        acctype = dom.xpath("//access-conditions/access/@type")
        if acctype and (len(acctype)>0):
            access=acctype[0].value
            if access in ['group', 'institution']:
                access = getTextFromNode(dom.xpath("//access-conditions/access/name")[0]).lower()
            
        docinfo['accessType'] = access
        return docinfo
    
        
    def getBibinfoFromIndexMeta(self,path,docinfo=None,dom=None,cut=0):
        """gets bibliographical info from the index.meta file at path or given by dom"""
        logging.debug("documentViewer (getbibinfofromindexmeta) path: %s"%(path))
        
        if docinfo is None:
            docinfo = {}
        
        if dom is None:
            for x in range(cut):
                path=getParentDir(path)
            dom = self.getIndexMeta(path)
        
        logging.debug("documentViewer (getbibinfofromindexmeta cutted) path: %s"%(path))
        # put in all raw bib fields as dict "bib"
        bib = dom.xpath("//bib/*")
        if bib and len(bib)>0:
            bibinfo = {}
            for e in bib:
                bibinfo[e.localName] = getTextFromNode(e)
            docinfo['bib'] = bibinfo
        
        # extract some fields (author, title, year) according to their mapping
        metaData=self.metadata.main.meta.bib
        bibtype=dom.xpath("//bib/@type")
        if bibtype and (len(bibtype)>0):
            bibtype=bibtype[0].value
        else:
            bibtype="generic"
            
        bibtype=bibtype.replace("-"," ") # wrong typesiin index meta "-" instead of " " (not wrong! ROC)
        docinfo['bib_type'] = bibtype
        bibmap=metaData.generateMappingForType(bibtype)
        # if there is no mapping bibmap is empty (mapping sometimes has empty fields)
        if len(bibmap) > 0 and len(bibmap['author'][0]) > 0:
            try:
                docinfo['author']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['author'][0])[0])
            except: pass
            try:
                docinfo['title']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['title'][0])[0])
            except: pass
            try:
                docinfo['year']=getTextFromNode(dom.xpath("//bib/%s"%bibmap['year'][0])[0])
            except: pass
            logging.debug("documentViewer (getbibinfofromindexmeta) using mapping for %s"%bibtype)
            try:
                docinfo['lang']=getTextFromNode(dom.xpath("//bib/lang")[0])
            except:
                docinfo['lang']=''

        return docinfo
    
    
    def getDocinfoFromTextTool(self, url, dom=None, docinfo=None):
        """parse texttool tag in index meta"""
        logger("documentViewer (getdocinfofromtexttool)", logging.INFO, "url: %s" % (url))
        if docinfo is None:
           docinfo = {}
        if docinfo.get('lang', None) is None:
            docinfo['lang'] = '' # default keine Sprache gesetzt
        if dom is None:
            dom = self.getIndexMeta(url)
        
        archivePath = None
        archiveName = None
    
        archiveNames = dom.xpath("//resource/name")
        if archiveNames and (len(archiveNames) > 0):
            archiveName = getTextFromNode(archiveNames[0])
        else:
            logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/name missing in: %s" % (url))
        
        archivePaths = dom.xpath("//resource/archive-path")
        if archivePaths and (len(archivePaths) > 0):
            archivePath = getTextFromNode(archivePaths[0])
            # clean up archive path
            if archivePath[0] != '/':
                archivePath = '/' + archivePath
            if archiveName and (not archivePath.endswith(archiveName)):
                archivePath += "/" + archiveName
        else:
            # try to get archive-path from url
            logger("documentViewer (getdocinfofromtexttool)", logging.WARNING, "resource/archive-path missing in: %s" % (url))
            if (not url.startswith('http')):
                archivePath = url.replace('index.meta', '')
                
        if archivePath is None:
            # we balk without archive-path
            raise IOError("Missing archive-path (for text-tool) in %s" % (url))
        
        imageDirs = dom.xpath("//texttool/image")
        if imageDirs and (len(imageDirs) > 0):
            imageDir = getTextFromNode(imageDirs[0])
            
        else:
            # we balk with no image tag / not necessary anymore because textmode is now standard
            #raise IOError("No text-tool info in %s"%(url))
            imageDir = ""
            #xquery="//pb"  
            docinfo['imagePath'] = "" # keine Bilder
            docinfo['imageURL'] = ""
            
        if imageDir and archivePath:
            #print "image: ", imageDir, " archivepath: ", archivePath
            imageDir = os.path.join(archivePath, imageDir)
            imageDir = imageDir.replace("/mpiwg/online", '')
            docinfo = self.getDirinfoFromDigilib(imageDir, docinfo=docinfo)
            docinfo['imagePath'] = imageDir
            
            docinfo['imageURL'] = self.digilibBaseUrl + "/servlet/Scaler?fn=" + imageDir
            
        viewerUrls = dom.xpath("//texttool/digiliburlprefix")
        if viewerUrls and (len(viewerUrls) > 0):
            viewerUrl = getTextFromNode(viewerUrls[0])
            docinfo['viewerURL'] = viewerUrl
                   
        textUrls = dom.xpath("//texttool/text")
        if textUrls and (len(textUrls) > 0):
            textUrl = getTextFromNode(textUrls[0])
            if urlparse.urlparse(textUrl)[0] == "": #keine url
                textUrl = os.path.join(archivePath, textUrl) 
            # fix URLs starting with /mpiwg/online
            if textUrl.startswith("/mpiwg/online"):
                textUrl = textUrl.replace("/mpiwg/online", '', 1)
            
            docinfo['textURL'] = textUrl
    
        textUrls = dom.xpath("//texttool/text-url-path")
        if textUrls and (len(textUrls) > 0):
            textUrl = getTextFromNode(textUrls[0])
            docinfo['textURLPath'] = textUrl   
         
        presentationUrls = dom.xpath("//texttool/presentation")
        docinfo = self.getBibinfoFromIndexMeta(url, docinfo=docinfo, dom=dom)   # get info von bib tag
        
        if presentationUrls and (len(presentationUrls) > 0): # ueberschreibe diese durch presentation informationen 
             # presentation url ergiebt sich ersetzen von index.meta in der url der fuer die Metadaten
             # durch den relativen Pfad auf die presentation infos
            presentationPath = getTextFromNode(presentationUrls[0])
            if url.endswith("index.meta"): 
                presentationUrl = url.replace('index.meta', presentationPath)
            else:
                presentationUrl = url + "/" + presentationPath
            docinfo = self.getNumPages(docinfo) #im moment einfach auf eins setzen, navigation ueber die thumbs geht natuerlich nicht    
            docinfo = self.getBibinfoFromTextToolPresentation(presentationUrl, docinfo=docinfo, dom=dom)
    
        docinfo = self.getAuthinfoFromIndexMeta(url, docinfo=docinfo, dom=dom)   # get access info
        
        return docinfo
   
   
    def getBibinfoFromTextToolPresentation(self,url,docinfo=None,dom=None):
        """gets the bibliographical information from the preseantion entry in texttools
        """
        dom=self.getPresentationInfoXML(url)
        try:
            docinfo['author']=getTextFromNode(dom.xpath("//author")[0])
        except:
            pass
        try:
            docinfo['title']=getTextFromNode(dom.xpath("//title")[0])
        except:
            pass
        try:
            docinfo['year']=getTextFromNode(dom.xpath("//date")[0])
        except:
            pass
        return docinfo
    
    def getDocinfoFromImagePath(self,path,docinfo=None,cut=0):
        """path ist the path to the images it assumes that the index.meta file is one level higher."""
        logger("documentViewer (getdocinfofromimagepath)", logging.INFO,"path: %s"%(path))
        if docinfo is None:
            docinfo = {}
        path=path.replace("/mpiwg/online","")
        docinfo['imagePath'] = path
        docinfo=self.getDirinfoFromDigilib(path,docinfo=docinfo,cut=cut)
        
        pathorig=path
        for x in range(cut):       
                path=getParentDir(path)
        logging.error("PATH:"+path)
        imageUrl=self.digilibBaseUrl+"/servlet/Scaler?fn="+path
        docinfo['imageURL'] = imageUrl
        
        #path ist the path to the images it assumes that the index.meta file is one level higher.
        docinfo = self.getBibinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1)
        docinfo = self.getAuthinfoFromIndexMeta(pathorig,docinfo=docinfo,cut=cut+1)
        return docinfo
    
    
    def getDocinfo(self, mode, url):
        """returns docinfo depending on mode"""
        logger("documentViewer (getdocinfo)", logging.INFO,"mode: %s, url: %s"%(mode,url))
        # look for cached docinfo in session
        if self.REQUEST.SESSION.has_key('docinfo'):
            docinfo = self.REQUEST.SESSION['docinfo']
            # check if its still current
            if docinfo is not None and docinfo.get('mode') == mode and docinfo.get('url') == url:
                logger("documentViewer (getdocinfo)", logging.INFO,"docinfo in session: %s"%docinfo)
                return docinfo
        # new docinfo
        docinfo = {'mode': mode, 'url': url}
        if mode=="texttool": #index.meta with texttool information
            docinfo = self.getDocinfoFromTextTool(url, docinfo=docinfo)
        elif mode=="imagepath":
            docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo)
        elif mode=="filepath":
            docinfo = self.getDocinfoFromImagePath(url, docinfo=docinfo,cut=1)
        else:
            logger("documentViewer (getdocinfo)", logging.ERROR,"unknown mode!")
            raise ValueError("Unknown mode %s! Has to be one of 'texttool','imagepath','filepath'."%(mode))
                        
        logger("documentViewer (getdocinfo)", logging.INFO,"docinfo: %s"%docinfo)
        self.REQUEST.SESSION['docinfo'] = docinfo
        return docinfo
        
        
    def getPageinfo(self, current, start=None, rows=None, cols=None, docinfo=None, viewMode=None, tocMode=None):
        """returns pageinfo with the given parameters"""
        pageinfo = {}
        current = getInt(current)
        pageinfo['current'] = current
        rows = int(rows or self.thumbrows)
        pageinfo['rows'] = rows
        cols = int(cols or self.thumbcols)
        pageinfo['cols'] = cols
        grpsize = cols * rows
        pageinfo['groupsize'] = grpsize
        start = getInt(start, default=(math.ceil(float(current)/float(grpsize))*grpsize-(grpsize-1)))
        # int(current / grpsize) * grpsize +1))
        pageinfo['start'] = start
        pageinfo['end'] = start + grpsize
        if (docinfo is not None) and ('numPages' in docinfo):
            np = int(docinfo['numPages'])
            pageinfo['end'] = min(pageinfo['end'], np)
            pageinfo['numgroups'] = int(np / grpsize)
            if np % grpsize > 0:
                pageinfo['numgroups'] += 1
                
        pageinfo['viewMode'] = viewMode
        pageinfo['tocMode'] = tocMode
        pageinfo['query'] = self.REQUEST.get('query',' ')
        pageinfo['queryType'] = self.REQUEST.get('queryType',' ')
        pageinfo['querySearch'] =self.REQUEST.get('querySearch', 'fulltext')
        pageinfo['tocPageSize'] = self.REQUEST.get('tocPageSize', '30')
        pageinfo['queryPageSize'] =self.REQUEST.get('queryPageSize', '20')
        pageinfo['tocPN'] = self.REQUEST.get('tocPN', '1')
        pageinfo['searchPN'] =self.REQUEST.get('searchPN','1')
        pageinfo['sn'] =self.REQUEST.get('sn','1')

        return pageinfo
                
    def getSearch(self, pn=1, pageinfo=None,  docinfo=None, query=None, queryType=None):
        """get search list"""
        docpath = docinfo['textURLPath'] 
        pagesize = pageinfo['queryPageSize']
        pn = pageinfo['searchPN']
        sn = pageinfo['sn']
        query =pageinfo['query']
        queryType =pageinfo['queryType']
    
        pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql","document=%s&mode=%s&queryType=%s&query=%s&queryResultPageSize=%s&queryResultPN=%s&sn=%s"%(docpath, 'text', queryType, query, pagesize, pn, sn) ,outputUnicode=False)           
        pagedom = Parse(pagexml)
        #pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
        
        return pagexml
        #if len(pagedivs) > 0:
        #    pagenode = pagedom[0]
        #    return serializeNode(pagenode)
        #else:
        #    return "xaxa"

    def getNumPages(self,docinfo=None):
        """get list of pages from fulltext and put in docinfo"""
        xquery = '//pb'
        text = self.template.fulltextclient.eval("/mpdl/interface/xquery.xql", "document=%s&xquery=%s"%(docinfo['textURLPath'],xquery))
        # TODO: better processing of the page list. do we need the info somewhere else also?
        docinfo['numPages'] = text.count("<pb ")
        return docinfo
       
    def getTextPage(self, mode="text", pn=1, docinfo=None):
        """returns single page from fulltext"""
        docpath = docinfo['textURLPath']
        if mode == "text_dict":
            textmode = "textPollux"
        else:
            textmode = mode
            
        pagexml=self.template.fulltextclient.eval("/mpdl/interface/page-fragment.xql", "document=%s&mode=%s&pn=%s"%(docpath,textmode,pn), outputUnicode=False)
        #######
        #textpython = pagexml.replace('page-fragment.xql?document=/echo/la/Benedetti_1585.xml','?url=/mpiwg/online/permanent/library/163127KK&tocMode='+str(tocMode)+'&queryResultPN='+str(queryResultPN)+'&viewMode='+str(viewMode))
        #textnew =textpython.replace('mode=text','mode=texttool')
        #######
        # post-processing downloaded xml
        pagedom = Parse(pagexml)
        # plain text mode
        if mode == "text":
            # first div contains text
            pagedivs = pagedom.xpath("/div")
            #queryResultPage
            if len(pagedivs) > 0:
                pagenode = pagedivs[0]
                return serializeNode(pagenode)

        # text-with-links mode
        if mode == "text_dict":
            # first div contains text
            pagedivs = pagedom.xpath("/div")
            if len(pagedivs) > 0:
                pagenode = pagedivs[0]
                # check all a-tags
                links = pagenode.xpath("//a")
                for l in links:
                    hrefNode = l.getAttributeNodeNS(None, u"href")
                    if hrefNode:
                        # is link with href
                        href = hrefNode.nodeValue
                        if href.startswith('lt/lex.xql'):
                            # is pollux link
                            selfurl = self.absolute_url()
                            # change href
                            hrefNode.nodeValue = href.replace('lt/lex.xql','%s/head_main_voc'%selfurl)
                            # add target
                            l.setAttributeNS(None, 'target', '_blank')
                return serializeNode(pagenode)
        
        return "no text here"

    def getToc(self, mode="text", docinfo=None):
        """loads table of contents and stores in docinfo"""
        logging.debug("documentViewer (gettoc) mode: %s"%(mode))
        if 'tocSize_%s'%mode in docinfo:
            # cached toc
            return docinfo
        
        docpath = docinfo['textURLPath']
        # we need to set a result set size
        pagesize = 1000
        pn = 1
        if mode == "text":
            queryType = "toc"
        else:
            queryType = mode
        # number of entries in toc
        tocSize = 0
        tocDiv = None
        pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType,pagesize,pn), outputUnicode=False)
        # post-processing downloaded xml
        pagedom = Parse(pagexml)
        # get number of entries
        numdivs = pagedom.xpath("//div[@class='queryResultHits']")
        if len(numdivs) > 0:
            tocSize = int(getTextFromNode(numdivs[0]))
            # div contains text
            #pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
            #if len(pagedivs) > 0:
            #    tocDiv = pagedivs[0]

        docinfo['tocSize_%s'%mode] = tocSize
        #docinfo['tocDiv_%s'%mode] = tocDiv
        return docinfo
    
    def getTocPage(self, mode="text", pn=1, pageinfo=None, docinfo=None):
        """returns single page from the table of contents"""
        # TODO: this should use the cached TOC
        if mode == "text":
            queryType = "toc"
        else:
            queryType = mode
        docpath = docinfo['textURLPath']
        pagesize = pageinfo['tocPageSize']
        pn = pageinfo['tocPN']
        pagexml=self.template.fulltextclient.eval("/mpdl/interface/doc-query.xql", "document=%s&queryType=%s&queryResultPageSize=%s&queryResultPN=%s"%(docpath,queryType, pagesize, pn), outputUnicode=False)
        # post-processing downloaded xml
        pagedom = Parse(pagexml)
        # div contains text
        pagedivs = pagedom.xpath("//div[@class='queryResultPage']")
        if len(pagedivs) > 0:
            pagenode = pagedivs[0]
            return serializeNode(pagenode)
        else:
            return "No TOC!"

    
    def changeDocumentViewer(self,title="",digilibBaseUrl=None,thumbrows=2,thumbcols=5,authgroups='mpiwg',RESPONSE=None):
        """init document viewer"""
        self.title=title
        self.digilibBaseUrl = digilibBaseUrl
        self.thumbrows = thumbrows
        self.thumbcols = thumbcols
        self.authgroups = [s.strip().lower() for s in authgroups.split(',')]
        if RESPONSE is not None:
            RESPONSE.redirect('manage_main')
    
    
        
def manage_AddDocumentViewerForm(self):
    """add the viewer form"""
    pt=PageTemplateFile('zpt/addDocumentViewer', globals()).__of__(self)
    return pt()
  
def manage_AddDocumentViewer(self,id,imageScalerUrl="",textServerName="",title="",RESPONSE=None):
    """add the viewer"""
    newObj=documentViewer(id,imageScalerUrl=imageScalerUrl,title=title,textServerName=textServerName)
    self._setObject(id,newObj)
    
    if RESPONSE is not None:
        RESPONSE.redirect('manage_main')


##
## DocumentViewerTemplate class
##
class DocumentViewerTemplate(ZopePageTemplate):
    """Template for document viewer"""
    meta_type="DocumentViewer Template"


def manage_addDocumentViewerTemplateForm(self):
    """Form for adding"""
    pt=PageTemplateFile('zpt/addDocumentViewerTemplate', globals()).__of__(self)
    return pt()

def manage_addDocumentViewerTemplate(self, id='viewer_main', title=None, text=None,
                           REQUEST=None, submit=None):
    "Add a Page Template with optional file content."

    self._setObject(id, DocumentViewerTemplate(id))
    ob = getattr(self, id)
    txt=file(os.path.join(package_home(globals()),'zpt/viewer_main.zpt'),'r').read()
    logging.info("txt %s:"%txt)
    ob.pt_edit(txt,"text/html")
    if title:
        ob.pt_setTitle(title)
    try:
        u = self.DestinationURL()
    except AttributeError:
        u = REQUEST['URL1']
        
    u = "%s/%s" % (u, urllib.quote(id))
    REQUEST.RESPONSE.redirect(u+'/manage_main')
    return ''


    

FreeBSD-CVSweb <freebsd-cvsweb@FreeBSD.org>